diff --git a/CHANGELOG.md b/CHANGELOG.md index d7f7b3bcc..687147663 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,44 @@ +## v1.8.0 (28 Feb 2022) + +**Features** + +**Nutanix Database Service (Formerly Era)** + + - Ansible module for clusters info + - Ansible module for clusters + - Ansible module for vlans + - Ansible module for vlans info + - Ansible module for stretched vlans + - Ansible module for profiles + - Ansible module for profiles info + - Ansible module for slas + - Ansible module for slas info + - Ansible module for tags + - Ansible module for database instances + - Ansible module for database instance registration + - Ansible module for database instances info + - Ansible module for database server vms + - Ansible module for database server vms info + - Ansible module for database server vm registration + - Ansible module for time machine clusters + - Ansible module for time machines info + - Ansible module for authorization of database server vm with time machines + - Ansible module for database clones + - Ansible module for database clones info + - Ansible module for database clones refresh + - Ansible module for snapshots info + - Ansible module for database snapshots + - Ansible module for replicating database snapshots + - Ansible module for log catchups + - Ansible module for database restore + - Ansible module for database scale + - Ansible module for linked databases + - Ansible module for maintenance windows + - Ansible module for maintenance windows info + - Ansible module for maintenance tasks + +**Full Changelog:** [here](https://github.com/nutanix/nutanix.ansible/compare/v1.7.0...v1.8.0) + ## v1.8.0-beta.1 (20 Oct 2022) **Features** diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 52490f58a..a563534d7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,20 +5,44 @@ Nutanix.Ncp Release Notes .. contents:: Topics -v1.8.0-beta.1 -============= +v1.8.0 +====== New Modules ----------- +- ntnx_ndb_authorize_db_server_vms - module for authorizing db server vm - ntnx_ndb_clones_info - info module for database clones +- ntnx_ndb_clusters - Create, Update and Delete NDB clusters - ntnx_ndb_clusters_info - info module for ndb clusters info +- ntnx_ndb_database_clone_refresh - module for database clone refresh. +- ntnx_ndb_database_clones - module for create, update and delete of ndb database clones +- ntnx_ndb_database_log_catchup - module for performing log catchups action +- ntnx_ndb_database_restore - module for restoring database instance +- ntnx_ndb_database_scale - module for scaling database instance +- ntnx_ndb_database_snapshots - module for creating, updating and deleting database snapshots - ntnx_ndb_databases - Module for create, update and delete of single instance database. Currently, postgres type database is officially supported. - ntnx_ndb_databases_info - info module for ndb database instances +- ntnx_ndb_db_server_vms - module for create, delete and update of database server vms - ntnx_ndb_db_servers_info - info module for ndb db server vms info +- ntnx_ndb_linked_databases - module to manage linked databases of a database instance +- ntnx_ndb_maintenance_tasks - module to add and remove maintenance related tasks +- ntnx_ndb_maintenance_window - module to create, update and delete mainetance window +- ntnx_ndb_maintenance_windows_info - module for fetching maintenance windows info +- ntnx_ndb_profiles - module for create, update and delete of profiles - ntnx_ndb_profiles_info - info module for ndb profiles +- ntnx_ndb_register_database - module for database instance registration +- ntnx_ndb_register_db_server_vm - module for registration of database server vm +- ntnx_ndb_replicate_database_snapshots - module for replicating database snapshots across clusters of time machine +- ntnx_ndb_slas - moudle for creating, updating and deleting slas - ntnx_ndb_slas_info - info module for ndb slas +- ntnx_ndb_snapshots_info - info module for ndb snapshots info +- ntnx_ndb_stretched_vlans - Module for create, update and delete of stretched vlan. +- ntnx_ndb_tags - module for create, update and delete of tags +- ntnx_ndb_time_machine_clusters - Module for create, update and delete for data access management in time machines. - ntnx_ndb_time_machines_info - info module for ndb time machines +- ntnx_ndb_vlans - Module for create, update and delete of ndb vlan. +- ntnx_ndb_vlans_info - info module for ndb vlans v1.7.0 ====== diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 74e869513..6990e0540 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -54,9 +54,7 @@ further defined and clarified by project maintainers. ## Enforcement -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at build@rubrik.com. All -complaints will be reviewed and investigated and will result in a response that +All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. diff --git a/README.md b/README.md index 06cebdaee..ea8f44acd 100644 --- a/README.md +++ b/README.md @@ -61,12 +61,12 @@ Karbon based examples : https://github.com/nutanix/nutanix.ansible/tree/main/exa ## Nutanix Database Service (ERA) > For the 1.8.0-beta.1 release of the ansible plugin, it will have N-1 compatibility with the Nutanix Database Service (ERA). This release was tested against era versions v2.4.1 and v2.4.0 -NDB based examples : https://github.com/nutanix/nutanix.ansible/tree/main/examples/ndb +> For the 1.8.0 release of the ansible plugin, it will have N-1 compatibility with the Nutanix Database Service (ERA). This release was tested against era versions v2.5.0 and v2.5.1 -Nutanix Ansible support for Nutanix Database Service is currently at beta stage. +NDB based examples : https://github.com/nutanix/nutanix.ansible/tree/main/examples/ndb ### Notes: -1. Currently for ntnx_ndb_databases, creation of only postgres type database instance is tested and offically supported. +1. Currently NDB based modules are supported and tested against postgres based databases. # Installing the collection **Prerequisite** @@ -152,14 +152,6 @@ ansible-playbook examples/iaas/iaas.yml | ntnx_karbon_clusters_info | Get clusters info. | | ntnx_karbon_registries | Create, Delete a karbon private registry entry | | ntnx_karbon_registries_info | Get karbon private registry registry info. | -| ntnx_ndb_databases | Create, Update and Delete single instance database. | -| ntnx_ndb_databases_info | Get database info. | -| ntnx_ndb_db_servers_info | Get db servers vm info. | -| ntnx_ndb_clusters_info | Get clusters info. | -| ntnx_ndb_slas_info | Get slas info | -| ntnx_ndb_profiles_info | Get profiles info. | -| ntnx_ndb_time_machines_info | Get time machines info. | -| ntnx_ndb_clones_info | Get database clones info. | | ntnx_pbrs | Create or delete a PBR. | | ntnx_pbrs_info | List existing PBRs. | | ntnx_permissions_info | List permissions info | @@ -203,6 +195,38 @@ ansible-playbook examples/iaas/iaas.yml | ntnx_foundation_central_api_keys_info | List all the api keys created in Foundation Central. | | ntnx_foundation_central_imaged_clusters_info | List all the clusters created using Foundation Central. | | ntnx_foundation_central_imaged_nodes_info | List all the nodes registered with Foundation Central. | +| ntnx_ndb_databases_info | Get ndb database instance info | +| ntnx_ndb_clones_info | Get ndb database clones info. | +| ntnx_ndb_time_machines_info | Get ndb time machines info. | +| ntnx_ndb_profiles_info | Get ndb profiles info. | +| ntnx_ndb_db_servers_info | Get ndb database server vms info. | +| ntnx_ndb_databases | Create, update and delete database instances. | +| ntnx_ndb_register_database | Register database instance. | +| ntnx_ndb_db_server_vms | Create, update and delete database server vms. | +| ntnx_ndb_clusters_info | Get clusters info. | +| ntnx_ndb_clusters | Create, update and delete clusters in NDB | +| ntnx_ndb_snapshots_info | Get snapshots info | +| ntnx_ndb_vlans | Create, update and delete vlans | +| ntnx_ndb_vlans_info | Get vlans info in NDB | +| ntnx_ndb_stretched_vlans | Get stretched vlans inf in NDB | +| ntnx_ndb_time_machine_clusters | Manage clusters in NDB time machines | +| ntnx_ndb_tags | Create, update and delete tags | +| ntnx_ndb_database_clones | Create, update and delete database clones | +| ntnx_ndb_database_snapshots | Create, update and delete database snapshots | +| ntnx_ndb_database_clone_refresh | Perform database clone refresh | +| ntnx_ndb_authorize_db_server_vms | authorize database server vms with time machines | +| ntnx_ndb_profiles | create, update and delete all kind of profiles | +| ntnx_ndb_database_log_catchup | perform log catchup | +| ntnx_ndb_database_restore | perform database restore | +| ntnx_ndb_database_scale | perform database scaling | +| ntnx_ndb_linked_databases | Add and remove linked databases of database instance | +| ntnx_ndb_replicate_database_snapshots | replicate snapshots accross clusters in time machines | +| ntnx_ndb_register_db_server_vm | register database server vm | +| ntnx_ndb_maintenance_tasks | Add and remove maintenance tasks in window | +| ntnx_ndb_maintenance_window | Create, update and delete maintenance window | +| ntnx_ndb_maintenance_windows_info | Get maintenance window info | +| ntnx_ndb_slas | Create, update and delete sla | +| ntnx_ndb_slas_info | Get slas info | ## Inventory Plugins diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index dad2effb2..d8e442afb 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -282,6 +282,83 @@ releases: - nutanix.ncp.ntnx_prism_vm_inventory - [Imprv] add functionality constructed to module inventory [\#235](https://github.com/nutanix/nutanix.ansible/issues/235) release_date: '2022-09-30' + 1.8.0: + modules: + - description: module for authorizing db server vm + name: ntnx_ndb_authorize_db_server_vms + namespace: '' + - description: Create, Update and Delete NDB clusters + name: ntnx_ndb_clusters + namespace: '' + - description: module for database clone refresh. + name: ntnx_ndb_database_clone_refresh + namespace: '' + - description: module for create, update and delete of ndb database clones + name: ntnx_ndb_database_clones + namespace: '' + - description: module for performing log catchups action + name: ntnx_ndb_database_log_catchup + namespace: '' + - description: module for restoring database instance + name: ntnx_ndb_database_restore + namespace: '' + - description: module for scaling database instance + name: ntnx_ndb_database_scale + namespace: '' + - description: module for creating, updating and deleting database snapshots + name: ntnx_ndb_database_snapshots + namespace: '' + - description: module for create, delete and update of database server vms + name: ntnx_ndb_db_server_vms + namespace: '' + - description: module to manage linked databases of a database instance + name: ntnx_ndb_linked_databases + namespace: '' + - description: module to add and remove maintenance related tasks + name: ntnx_ndb_maintenance_tasks + namespace: '' + - description: module to create, update and delete mainetance window + name: ntnx_ndb_maintenance_window + namespace: '' + - description: module for fetching maintenance windows info + name: ntnx_ndb_maintenance_windows_info + namespace: '' + - description: module for create, update and delete of profiles + name: ntnx_ndb_profiles + namespace: '' + - description: module for database instance registration + name: ntnx_ndb_register_database + namespace: '' + - description: module for registration of database server vm + name: ntnx_ndb_register_db_server_vm + namespace: '' + - description: module for replicating database snapshots across clusters of time + machine + name: ntnx_ndb_replicate_database_snapshots + namespace: '' + - description: moudle for creating, updating and deleting slas + name: ntnx_ndb_slas + namespace: '' + - description: info module for ndb snapshots info + name: ntnx_ndb_snapshots_info + namespace: '' + - description: Module for create, update and delete of stretched vlan. + name: ntnx_ndb_stretched_vlans + namespace: '' + - description: module for create, update and delete of tags + name: ntnx_ndb_tags + namespace: '' + - description: Module for create, update and delete for data access management + in time machines. + name: ntnx_ndb_time_machine_clusters + namespace: '' + - description: Module for create, update and delete of ndb vlan. + name: ntnx_ndb_vlans + namespace: '' + - description: info module for ndb vlans + name: ntnx_ndb_vlans_info + namespace: '' + release_date: '2023-02-28' 1.8.0-beta.1: modules: - description: info module for database clones diff --git a/examples/ndb/README.md b/examples/ndb/README.md new file mode 100644 index 000000000..dbe51d2f3 --- /dev/null +++ b/examples/ndb/README.md @@ -0,0 +1,107 @@ +# Nutanix Database Service +Nutanix ansibe collection nutanix.ncp from v1.8.0 will contain modules for supporting Nutanix Database Service (NDB) features. + +These modules are based on workflow : + +![NDB workflow](ndb_workflow.png) + +Note: Access Control update and enabling multi-cluster in NDB is not yet supported by nutanix ansible provider. + +## Usage +Below we will discuss usage of various NDB modules. + +First, we need to configure NDB setup details in the playbook. This can be done either by using module_defaults or in each individual tasks. + +```yaml +--- +- name: NDB operations + hosts: localhost + gather_facts: false + collections: + - nutanix.ncp + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: true + tasks: + - name: create tags + ntnx_ndb_tags: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: true +``` + +Initially on NDB setup, we add cluster, vlans or stretched vlans. + +For example, playbook task to register AHV cluster in NDB setup is : + +```yaml + - name: NDB cluster registration + ntnx_ndb_clusters: + name: "" + desc: "" + name_prefix: "" + cluster_ip: "" + cluster_credentials: + username: "" + password: "" + agent_network: + dns_servers: + - "" + - "" + ntp_servers: + - "" + - "" + - "" + - "" + vlan_access: + prism_vlan: + vlan_name: "" + vlan_type: "" + static_ip: "" + gateway: "" + subnet_mask: "" + storage_container: "" + register: output +``` + +Examples for adding vlans and stretched vlans can be found in module documentation or examples/ndb/ in our GitHub repository. + +Currently, we only support postgres type databases. + +Example playbook task to deploy single instance postgres type database in NDB is : + +```yaml + - name: Create single instance postgres database + ntnx_ndb_databases: + name: POSTGRES_DATABASE_ANSIBLE + db_params_profile: + name: DEFAULT_POSTGRES_PARAMS + db_vm: + create_new_server: + name: postgres_server_ansible + password: temp_password + cluster: + name: TempCluster + software_profile: + name: POSTGRES_10.4_OOB + network_profile: + name: DEFAULT_OOB_POSTGRESQL_NETWORK + compute_profile: + name: DEFAULT_OOB_SMALL_COMPUTE + pub_ssh_key: "" + + postgres: + listener_port: "5432" + db_name: test_ansible + db_password: db_password + db_size: 200 + + time_machine: + name: POSTGRES_DATABASE_ANSIBLE_TM + sla: + name: "None" +``` diff --git a/examples/ndb/all_day2_actions.yml b/examples/ndb/all_day2_actions.yml new file mode 100644 index 000000000..9320ae047 --- /dev/null +++ b/examples/ndb/all_day2_actions.yml @@ -0,0 +1,131 @@ +--- +# Summary: +# This playbook will do: +# 1. Create snapshot +# 2. Update and delete snapshots +# 3. Perform log catchup on database +# 4. Restore database to previously created snapshot and latest snapshot +# 5. Scale database +# 6. Add/Remove linked databases + + +- name: perform day2 actions + hosts: localhost + gather_facts: false + collections: + - nutanix.ncp + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + + tasks: + + ############################################ snapshots ########################################### + + - name: create snapshot with minimal spec + ntnx_ndb_database_snapshots: + name: "{{snapshot_name}}1" + time_machine_uuid: "{{time_machine_uuid}}" + register: result + + - name: create snapshot with expiry + ntnx_ndb_database_snapshots: + name: "{{snapshot_name}}2" + time_machine_uuid: "{{time_machine_uuid}}" + expiry_days: 4 + register: result + + - set_fact: + snapshot_uuid: "{{result.snapshot_uuid}}" + + - name: rename snapshot + ntnx_ndb_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + name: "{{snapshot_name}}2-updated" + register: result + + - name: update expiry + ntnx_ndb_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + expiry_days: 5 + register: result + + - name: remove expiry schedule + ntnx_ndb_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + remove_expiry: true + register: result + + - name: Add expiry schedule and rename + ntnx_ndb_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + name: "{{snapshot_name}}2" + expiry_days: 6 + register: result + + + ############################################ log catchup ###################################### + - name: perform log catchup for restore + ntnx_ndb_database_log_catchup: + time_machine_uuid: "{{time_machine_uuid}}" + for_restore: true + register: result + + - name: perform log catchup + ntnx_ndb_database_log_catchup: + time_machine_uuid: "{{time_machine_uuid}}" + for_restore: true + + register: result + + ########################################### restore ########################################### + + - name: perform using pitr timestamp + ntnx_ndb_database_restore: + db_uuid: "{{db_uuid}}" + pitr_timestamp: "2023-01-02 11:02:22" + timezone: "UTC" + register: result + + - name: perform restore using latest snapshot + ntnx_ndb_database_restore: + db_uuid: "{{db_uuid}}" + snapshot_uuid: "{{snapshot_uuid}}" + register: result + + - name: perform restore using snapshot uuid + ntnx_ndb_database_restore: + db_uuid: "{{db_uuid}}" + snapshot_uuid: "{{snapshot_uuid}}" + register: result + + ########################################### scaling ########################################### + + - name: extend database storage for scaling database + ntnx_ndb_database_scale: + db_uuid: "{{db_uuid}}" + storage_gb: 2 + pre_update_cmd: "ls" + post_update_cmd: "ls -a" + + register: result + + ############################################ add / remove linked databases ########################################### + + - name: add databases in database instance + ntnx_ndb_linked_databases: + db_instance_uuid: "{{db_uuid}}" + databases: + - test1 + - test2 + register: result + + - name: remove databases in database instance + ntnx_ndb_linked_databases: + state: "absent" + db_instance_uuid: "{{db_uuid}}" + database_uuid: "{{linked_databases.test1}}" + register: result diff --git a/examples/ndb/create_stretched_vlan.yml b/examples/ndb/create_stretched_vlan.yml new file mode 100644 index 000000000..37b162c48 --- /dev/null +++ b/examples/ndb/create_stretched_vlan.yml @@ -0,0 +1,26 @@ +--- +- name: Create stretched vlan + hosts: localhost + gather_facts: false + collections: + - nutanix.ncp + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + + tasks: + + - name: Create stretched vlan + ntnx_ndb_stretched_vlans: + name: "{{st_vlan.name}}" + desc: "{{st_vlan.desc}}" + vlans: + - "" + - "" + register: output + + - debug: + msg: "{{output}}" diff --git a/examples/ndb/create_time_machine_cluster.yml b/examples/ndb/create_time_machine_cluster.yml new file mode 100644 index 000000000..b8dc44fda --- /dev/null +++ b/examples/ndb/create_time_machine_cluster.yml @@ -0,0 +1,27 @@ +--- +- name: NDB time machine's cluster creation + hosts: localhost + gather_facts: false + collections: + - nutanix.ncp + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + + tasks: + + - name: NDB time machine's cluster creation + ntnx_ndb_time_machine_clusters: + time_machine_uuid: "{{time_machine.uuid}}" + cluster: + name: "{{cluster.name}}" + sla: + name: "{{sla.name}}" + type: "{{type}}" + register: output + + - debug: + msg: "{{output}}" diff --git a/examples/ndb/create_vlan.yml b/examples/ndb/create_vlan.yml new file mode 100644 index 000000000..48e967731 --- /dev/null +++ b/examples/ndb/create_vlan.yml @@ -0,0 +1,44 @@ +--- +- name: Create Dhcp ndb vlan + hosts: localhost + gather_facts: false + collections: + - nutanix.ncp + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + + tasks: + + - name: Create Dhcp ndb vlan + ntnx_ndb_vlans: + name: "{{ndb_vlan.name}}" + vlan_type: DHCP + cluster: + uuid: "{{cluster.uuid}}" + register: output + + - debug: + msg: "{{output}}" + + - name: Create Static ndb vlan + ntnx_ndb_vlans: + name: "{{ndb_vlan.name}}" + vlan_type: Static + gateway: "{{ndb_vlan.gateway}}" + subnet_mask: "{{ndb_vlan.subnet_mask}}" + ip_pools: + - start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" + - start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" + end_ip: "{{ndb_vlan.ip_pools.1.end_ip}}" + primary_dns: "{{ndb_vlan.primary_dns}}" + secondary_dns: "{{ndb_vlan.secondary_dns}}" + dns_domain: "{{ndb_vlan.dns_domain}}" + register: output + + - debug: + msg: "{{output}}" diff --git a/examples/ndb/db_server_vms.yml b/examples/ndb/db_server_vms.yml new file mode 100644 index 000000000..c7a86122e --- /dev/null +++ b/examples/ndb/db_server_vms.yml @@ -0,0 +1,320 @@ +--- +- name: NDB db server vms + hosts: localhost + gather_facts: false + collections: + - nutanix.ncp + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + + tasks: + + - name: create spec for db server vm using time machine + check_mode: yes + ntnx_ndb_db_server_vms: + wait: True + name: "ansible-created-vm1-from-time-machine" + desc: "ansible-created-vm1-from-time-machine-time-machine" + time_machine: + uuid: "test_uuid" + snapshot_uuid: "test_snapshot_uuid" + compute_profile: + uuid: "test_compute_uuid" + network_profile: + uuid: "test_network_uuid" + cluster: + uuid: "test_cluster_uuid" + password: "test_password" + pub_ssh_key: "test_public_key" + database_type: "postgres_database" + automated_patching: + maintenance_window: + uuid: "test_window_uuid" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: check_mode_result + + - name: create spec for db server vm using software profile and names of profile + check_mode: yes + ntnx_ndb_db_server_vms: + wait: True + name: "{{ vm1_name }}" + desc: "ansible-created-vm1-desc" + software_profile: + name: "{{ software_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + network_profile: + name: "{{ network_profile.name }}" + cluster: + name: "{{ cluster.cluster1.name }}" + password: "{{ vm_password }}" + pub_ssh_key: "{{ public_ssh_key }}" + time_zone: "UTC" + database_type: "postgres_database" + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + register: result + + - name: create db server vm using software profile + ntnx_ndb_db_server_vms: + wait: True + name: "{{ vm1_name }}" + desc: "ansible-created-vm1-desc" + software_profile: + name: "{{ software_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + network_profile: + name: "{{ network_profile.name }}" + cluster: + name: "{{ cluster.cluster1.name }}" + password: "{{ vm_password }}" + pub_ssh_key: "{{ public_ssh_key }}" + time_zone: "UTC" + database_type: "postgres_database" + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + register: result + + + - name: update db server vm name, desc, credentials, tags + ntnx_ndb_db_server_vms: + wait: True + uuid: "{{db_server_uuid}}" + name: "{{vm1_name_updated}}" + desc: "ansible-created-vm1-updated-desc" + reset_name_in_ntnx_cluster: True + reset_desc_in_ntnx_cluster: True + update_credentials: + - username: "{{vm_username}}" + password: "{{vm_password}}" + tags: + ansible-db-server-vms: ansible-updated + register: result + + - name: create spec for update db server vm credentials + check_mode: yes + ntnx_ndb_db_server_vms: + wait: True + uuid: "{{db_server_uuid}}" + update_credentials: + - username: "user" + password: "pass" + register: result + + + - name: List NDB db_servers + ntnx_ndb_db_servers_info: + register: db_servers + + + - name: get NDB db_servers using it's name + ntnx_ndb_db_servers_info: + filters: + load_metrics: true + load_databases: True + value_type: name + value: "{{db_servers.response[0].name}}" + register: result + + - name: get NDB db_servers using it's ip + ntnx_ndb_db_servers_info: + filters: + value_type: ip + value: "{{db_servers.response[0].ipAddresses[0]}}" + register: result + + - name: get NDB db_servers using it's name + ntnx_ndb_db_servers_info: + name: "{{db_servers.response[0].name}}" + register: result + + - name: get NDB db_servers using it's id + ntnx_ndb_db_servers_info: + uuid: "{{db_servers.response[0].id}}" + register: result + + - name: get NDB db_servers using ip + ntnx_ndb_db_servers_info: + server_ip: "{{db_servers.response[0].ipAddresses[0]}}" + register: result + + ################################### maintenance tasks update tasks ############################# + + - name: create spec for adding maintenance window tasks to db server vm + check_mode: yes + ntnx_ndb_maintenance_tasks: + db_server_vms: + - name: "{{vm1_name_updated}}" + - uuid: "test_vm_1" + db_server_clusters: + - uuid: "test_cluter_1" + - uuid: "test_cluter_2" + maintenance_window: + name: "{{maintenance.window_name}}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls -a" + post_task_cmd: "ls" + - type: "DB_PATCHING" + pre_task_cmd: "ls -a" + post_task_cmd: "ls" + register: result + + - name: create spec for removing maintenance window tasks from above created vm + check_mode: yes + ntnx_ndb_maintenance_tasks: + db_server_vms: + - uuid: "{{db_server_uuid}}" + maintenance_window: + uuid: "{{maintenance.window_uuid}}" + tasks: [] + register: result + + + - name: remove maintenance tasks + ntnx_ndb_maintenance_tasks: + db_server_vms: + - uuid: "{{db_server_uuid}}" + maintenance_window: + uuid: "{{maintenance.window_uuid}}" + tasks: [] + register: result + + - name: Add maitenance window task for vm + ntnx_ndb_maintenance_tasks: + db_server_vms: + - name: "{{vm1_name_updated}}" + maintenance_window: + name: "{{maintenance.window_name}}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls -a" + post_task_cmd: "ls" + - type: "DB_PATCHING" + pre_task_cmd: "ls -a" + post_task_cmd: "ls" + register: result + + ################################### DB server VM unregistration tasks ############################# + + - name: generate check mode spec for unregister with default values + check_mode: yes + ntnx_ndb_db_server_vms: + state: "absent" + wait: True + uuid: "{{db_server_uuid}}" + register: result + + - name: genereate check mode spec for delete vm with vgs and snapshots + check_mode: yes + ntnx_ndb_db_server_vms: + state: "absent" + uuid: "{{db_server_uuid}}" + delete_from_cluster: True + delete_vgs: True + delete_vm_snapshots: True + register: result + + - name: unregister vm + ntnx_ndb_db_server_vms: + state: "absent" + wait: True + uuid: "{{db_server_uuid}}" + delete_from_cluster: False + soft_remove: True + delete_vgs: True + delete_vm_snapshots: True + register: result + + ################################### DB server VM Registration tasks ############################# + + + - name: generate spec for registeration of the previous unregistered vm using check mode + check_mode: yes + ntnx_ndb_register_db_server_vm: + ip: "{{vm_ip}}" + desc: "register-vm-desc" + reset_desc_in_ntnx_cluster: true + cluster: + name: "{{cluster.cluster1.name}}" + postgres: + software_path: "{{postgres.software_home}}" + private_ssh_key: "check-key" + username: "{{vm_username}}" + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + working_directory: "/check" + register: result + + - name: register the previous unregistered vm + ntnx_ndb_register_db_server_vm: + ip: "{{vm_ip}}" + desc: "register-vm-desc" + cluster: + name: "{{cluster.cluster1.name}}" + postgres: + listener_port: 5432 + software_path: "{{postgres.software_home}}" + username: "{{vm_username}}" + password: "{{vm_password}}" + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: result + + ################################### DB server VM Delete tasks ############################# + + + - name: unregister db server vm + ntnx_ndb_db_server_vms: + state: "absent" + wait: True + uuid: "{{db_server_uuid}}" + delete_from_cluster: false + delete_vgs: True + delete_vm_snapshots: True + register: result \ No newline at end of file diff --git a/examples/ndb/ndb_workflow.png b/examples/ndb/ndb_workflow.png new file mode 100644 index 000000000..e4d7e8fa1 Binary files /dev/null and b/examples/ndb/ndb_workflow.png differ diff --git a/examples/ndb/provision_postgres_ha_instance_with_ips.yml b/examples/ndb/provision_postgres_ha_instance_with_ips.yml new file mode 100644 index 000000000..94607a85d --- /dev/null +++ b/examples/ndb/provision_postgres_ha_instance_with_ips.yml @@ -0,0 +1,109 @@ +--- +# Here we will be deploying high availibility postgres database with static IPs assigned +# to vms and virtul IP for HA proxy +- name: Create stretched vlan + hosts: localhost + gather_facts: false + collections: + - nutanix.ncp + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + + tasks: + - name: create HA instance postgres database with static IP assignments to vms and cluster IP + ntnx_ndb_databases: + wait: true + timeout: 5400 + name: "" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "" + + db_server_cluster: + new_cluster: + name: "" + cluster: + name: "" + ips: + - cluster: + name: "" + ip: "" + + software_profile: + name: "" + network_profile: + name: "" + compute_profile: + name: "" + password: "" + pub_ssh_key: "" + vms: + - name: "vm-1" + node_type: "database" + role: "Primary" + ip: "" + + - name: "vm-2" + node_type: "database" + role: "Secondary" + ip: "" + + - name: "vm-3" + node_type: "database" + role: "Secondary" + ip: "" + + - name: "vm-ha-proxy1" + node_type: "haproxy" + ip: "" + + - name: "vm-ha-proxy2" + node_type: "haproxy" + ip: "" + + postgres: + type: "ha" + db_name: testAnsible + db_password: "" + db_size: 200 + patroni_cluster_name: "" + ha_proxy: + provision_virtual_ip: true + + time_machine: + name: TM2 + desc: TM-desc + sla: + name: "" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + clusters: + - name: "" + tags: + ansible-databases: "ha-instance-dbs" + + automated_patching: + maintenance_window: + name: "" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: result + + - debug: + msg: "{{result}}" diff --git a/examples/ndb/registr_cluster.yml b/examples/ndb/registr_cluster.yml new file mode 100644 index 000000000..589953308 --- /dev/null +++ b/examples/ndb/registr_cluster.yml @@ -0,0 +1,45 @@ +--- +- name: NDB cluster creation + hosts: localhost + gather_facts: false + collections: + - nutanix.ncp + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + + tasks: + + - name: NDB cluster creation + ntnx_ndb_clusters: + name: "{{cluster.name}}" + desc: "{{cluster.desc}}" + name_prefix: "{{cluster.name_prefix}}" + cluster_ip: "{{cluster.cluster_ip}}" + cluster_credentials: + username: "{{cluster_credentials.username}}" + password: "{{cluster_credentials.password}}" + agent_network: + dns_servers: + - "{{agent_network.dns_servers[0]}}" + - "{{agent_network.dns_servers[1]}}" + ntp_servers: + - "{{agent_network.ntp_servers[0]}}" + - "{{agent_network.ntp_servers[1]}}" + - "{{agent_network.ntp_servers[2]}}" + - "{{agent_network.ntp_servers[3]}}" + vlan_access: + prism_vlan: + vlan_name: "{{prism_vlan.vlan_name}}" + vlan_type: "{{prism_vlan.vlan_type}}" + static_ip: "{{prism_vlan.static_ip}}" + gateway: "{{prism_vlan.gateway}}" + subnet_mask: "{{prism_vlan.subnet_mask}}" + storage_container: "{{storage_container.name}}" + register: output + + - debug: + msg: "{{output}}" diff --git a/examples/ndb/software_profiles.yml b/examples/ndb/software_profiles.yml new file mode 100644 index 000000000..b733a93b2 --- /dev/null +++ b/examples/ndb/software_profiles.yml @@ -0,0 +1,187 @@ +--- +# Summary: +# This playbook will perform below cases: +# 1. Creation of software profile +# 2. Update software profile +# 3. Create, update and delete version +# 4. Publish, unpublish and deprecate profile +# 5. Replicate profiles to multi clusters +# 6. Delete of profile +- name: Create software profiles + hosts: localhost + gather_facts: false + collections: + - nutanix.ncp + module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + + tasks: + - name: create software profile create spec + check_mode: yes + ntnx_ndb_profiles: + name: "{{profile1_name}}" + desc: "{{profile1_name}}-desc" + type: "software" + database_type: "postgres" + software: + topology: "cluster" + name: "v1.0" + desc: "v1.0-desc" + notes: + os: "os_notes" + db_software: "db_notes" + db_server_vm: + name: "{{db_server_vm.name}}" + clusters: + - name: "" + - uuid: "" + register: result + + - name: create software profile with base version and cluster instance topology. Replicate to multiple clusters + ntnx_ndb_profiles: + name: "{{profile1_name}}-replicated" + desc: "{{profile1_name}}-desc-replicated" + type: "software" + database_type: "postgres" + software: + topology: "cluster" + name: "v1.0" + desc: "v1.0-desc" + notes: + os: "os_notes" + db_software: "db_notes" + db_server_vm: + uuid: "{{db_server_vm.uuid}}" + clusters: + - name: "" + - uuid: "" + register: result + + - name: create software profile with base version and single instance topology + ntnx_ndb_profiles: + name: "{{profile2_name}}" + desc: "{{profile2_name}}-desc" + type: "software" + database_type: "postgres" + software: + topology: "single" + name: "v1.0" + desc: "v1.0-desc" + notes: + os: "os_notes" + db_software: "db_notes" + db_server_vm: + uuid: "{{db_server_vm.uuid}}" + clusters: + - name: "" + register: result + + - name: update software profile + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + name: "{{profile1_name}}-updated1" + desc: "{{profile1_name}}-desc-updated" + register: result + + - name: create software profile version spec + check_mode: yes + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + database_type: "postgres" + software: + name: "v2.0" + desc: "v2.0-desc" + notes: + os: "os_notes for v2" + db_software: "db_notes for v2" + db_server_vm: + name: "{{db_server_vm.name}}" + + register: result + + - name: create software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + database_type: "postgres" + software: + name: "v2.0" + desc: "v2.0-desc" + notes: + os: "os_notes for v2" + db_software: "db_notes for v2" + db_server_vm: + uuid: "{{db_server_vm.uuid}}" + + register: result + + - name: create spec for update software profile version + check_mode: yes + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + database_type: "postgres" + software: + version_uuid: "{{result.version_uuid}}" + name: "v2.0-updated" + desc: "v2.0-desc-updated" + + register: result + + - name: update software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + database_type: "postgres" + software: + version_uuid: "{{result.version_uuid}}" + name: "v2.0-updated" + desc: "v2.0-desc-updated" + + register: result + + - name: publish software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + software: + version_uuid: "{{version_uuid}}" + publish: True + register: result + + - name: unpublish software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + software: + version_uuid: "{{version_uuid}}" + publish: false + register: result + + - name: deprecate software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + software: + version_uuid: "{{version_uuid}}" + deprecate: True + register: result + + - name: delete software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + software: + version_uuid: "{{version_uuid}}" + state: "absent" + register: result + + - name: replicate software profile + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + clusters: + - name: "{{cluster.cluster2.name}}" + register: result + + - name: delete software profile + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + state: "absent" + register: result diff --git a/galaxy.yml b/galaxy.yml index 3fec09102..70d59ebe3 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: "nutanix" name: "ncp" -version: "1.8.0-beta.1" +version: "1.8.0" readme: "README.md" authors: - "Abhishek Chaudhary (@abhimutant)" diff --git a/meta/runtime.yml b/meta/runtime.yml index b83f94afa..4ab2374cd 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -70,4 +70,28 @@ action_groups: - ntnx_ndb_db_servers_info - ntnx_ndb_slas_info - ntnx_ndb_databases + - ntnx_ndb_register_database + - ntnx_ndb_db_server_vms - ntnx_ndb_clusters_info + - ntnx_ndb_clusters + - ntnx_ndb_snapshots_info + - ntnx_ndb_vlans + - ntnx_ndb_vlans_info + - ntnx_ndb_stretched_vlans + - ntnx_ndb_time_machine_clusters + - ntnx_ndb_tags + - ntnx_ndb_database_clones + - ntnx_ndb_database_snapshots + - ntnx_ndb_database_clone_refresh + - ntnx_ndb_authorize_db_server_vms + - ntnx_ndb_profiles + - ntnx_ndb_database_log_catchup + - ntnx_ndb_database_restore + - ntnx_ndb_database_scale + - ntnx_ndb_linked_databases + - ntnx_ndb_replicate_database_snapshots + - ntnx_ndb_register_db_server_vm + - ntnx_ndb_maintenance_tasks + - ntnx_ndb_maintenance_window + - ntnx_ndb_maintenance_windows_info + - ntnx_ndb_slas diff --git a/plugins/doc_fragments/ntnx_ndb_base_module.py b/plugins/doc_fragments/ntnx_ndb_base_module.py index a52e798f7..f779c97e3 100644 --- a/plugins/doc_fragments/ntnx_ndb_base_module.py +++ b/plugins/doc_fragments/ntnx_ndb_base_module.py @@ -38,4 +38,10 @@ class ModuleDocFragment(object): - C(validate_certs). If not set then the value of the C(VALIDATE_CERTS), environment variable is used. type: bool default: true + timeout: + description: + - timeout for polling of operation, after which module will error out + type: int + required: false + default: 2100 """ diff --git a/plugins/doc_fragments/ntnx_ndb_info_base_module.py b/plugins/doc_fragments/ntnx_ndb_info_base_module.py new file mode 100644 index 000000000..db14fc278 --- /dev/null +++ b/plugins/doc_fragments/ntnx_ndb_info_base_module.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Plugin options for ntnx ndb info + DOCUMENTATION = r""" +options: + nutanix_host: + description: + - ndb era server IP address + - C(nutanix_host). If not set then the value of the C(NUTANIX_HOST), environment variable is used. + type: str + required: true + nutanix_password: + description: + - ndb era server password + - C(nutanix_password). If not set then the value of the C(NUTANIX_PASSWORD), environment variable is used. + type: str + required: true + nutanix_username: + description: + - ndb era server username + - C(nutanix_username). If not set then the value of the C(NUTANIX_USERNAME), environment variable is used. + type: str + required: true + validate_certs: + description: + - Set value to C(False) to skip validation for self signed certificates + - This is not recommended for production setup + - C(validate_certs). If not set then the value of the C(VALIDATE_CERTS), environment variable is used. + type: bool + default: true +""" diff --git a/plugins/module_utils/constants.py b/plugins/module_utils/constants.py index a487b64fc..09b0d3627 100644 --- a/plugins/module_utils/constants.py +++ b/plugins/module_utils/constants.py @@ -284,8 +284,25 @@ class NDB: OPERATIONS_POLLING_DELAY = 30 class DatabaseTypes: - POSTGRES = "postgres_database" + POSTGRES = "postgres" + ALL = [ + "oracle", + "postgres", + "sqlserver", + "mariadb", + "mysql", + "saphana", + "mongodb", + ] + + class ProfileTypes: + COMPUTE = "Compute" + NETWORK = "Network" + DB_PARAMS = "Database_Parameter" + SOFTWARE = "Software" + ALL = ["compute", "network", "database_parameter", "software"] class StatusCodes: SUCCESS = "5" FAILURE = "4" + COMPLETED_WITH_WARNING = "17" diff --git a/plugins/module_utils/entity.py b/plugins/module_utils/entity.py index aad30c40d..a46672f34 100644 --- a/plugins/module_utils/entity.py +++ b/plugins/module_utils/entity.py @@ -211,7 +211,7 @@ def list( return resp # "params" can be used to override module.params to create spec by other modules backened - def get_spec(self, old_spec=None, params=None): + def get_spec(self, old_spec=None, params=None, **kwargs): spec = copy.deepcopy(old_spec) or self._get_default_spec() ansible_params = None diff --git a/plugins/module_utils/ndb/clones.py b/plugins/module_utils/ndb/clones.py deleted file mode 100644 index dead5a246..000000000 --- a/plugins/module_utils/ndb/clones.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -from .nutanix_database import NutanixDatabase - - -class Clone(NutanixDatabase): - def __init__(self, module): - resource_type = "/clones" - super(Clone, self).__init__(module, resource_type=resource_type) - - def get_clone(self, uuid=None, name=None): - if uuid: - resp = self.read(uuid=uuid) - elif name: - query = {"value-type": "name", "value": name} - resp = self.read(query=query) - if not resp: - return None, "Clone with name {0} not found".format(name) - resp = resp[0] - else: - return None, "Please provide either uuid or name for fetching clone details" - - return resp, None diff --git a/plugins/module_utils/ndb/clusters.py b/plugins/module_utils/ndb/clusters.py index 53d9948de..5f8bd5642 100644 --- a/plugins/module_utils/ndb/clusters.py +++ b/plugins/module_utils/ndb/clusters.py @@ -4,6 +4,7 @@ __metaclass__ = type +from copy import deepcopy from .nutanix_database import NutanixDatabase @@ -12,6 +13,60 @@ class Cluster(NutanixDatabase): def __init__(self, module): resource_type = "/clusters" super(Cluster, self).__init__(module, resource_type=resource_type) + self.build_spec_methods = { + "name": self._build_spec_name, + "desc": self._build_spec_desc, + "name_prefix": self._build_spec_name_prefix, + "cluster_ip": self._build_spec_cluster_ip, + "cluster_credentials": self._build_spec_cluster_credentials, + "agent_network": self._build_spec_agent_network, + "vlan_access": self._build_spec_vlan_access, + "storage_container": self._build_spec_storage_container, + } + + def update( + self, + data=None, + uuid=None, + endpoint=None, + query=None, + raise_error=True, + no_response=False, + timeout=30, + method="PATCH", + ): + return super().update( + data, uuid, endpoint, query, raise_error, no_response, timeout, method + ) + + def delete( + self, + uuid=None, + endpoint=None, + query=None, + raise_error=True, + no_response=False, + timeout=30, + data=None, + ): + if not self._validate_cluster_deleting(uuid): + err = "NDB is unable to remove the Nutanix Cluster at this time. Check dependencies" + return None, err + if not data: + data = {"deleteRemoteSites": False} + return ( + super().delete( + uuid, endpoint, query, raise_error, no_response, timeout, data + ), + None, + ) + + def get_cluster_by_ip(self, cluster_ip): + clusters = self.read() + for cluster in clusters: + if cluster_ip in cluster["ipAddresses"]: + return cluster + return None def get_uuid( self, @@ -28,13 +83,13 @@ def get_uuid( def get_cluster(self, uuid=None, name=None): if uuid: - resp = self.read(uuid=uuid) + resp = self.read(uuid=uuid, raise_error=False) elif name: endpoint = "{0}/{1}".format("name", name) - resp = self.read(endpoint=endpoint) + resp = self.read(endpoint=endpoint, raise_error=False) # we fetch cluster using ID again to get complete info. - if resp and resp.get("id"): + if resp and not resp.get("errorCode") and resp.get("id"): resp = self.read(uuid=resp["id"]) else: @@ -43,8 +98,180 @@ def get_cluster(self, uuid=None, name=None): "Please provide either uuid or name for fetching cluster details", ) + if isinstance(resp, dict) and resp.get("errorCode"): + self.module.fail_json( + msg="Failed fetching cluster info", + error=resp.get("message"), + response=resp, + ) + return resp, None + def _get_default_spec(self): + return deepcopy( + { + "clusterName": "", + "clusterIP": "", + "storageContainer": "", + "agentVMPrefix": "", + "port": 9440, + "protocol": "https", + "clusterType": "NTNX", + "version": "v2", + "credentialsInfo": [], + "agentNetworkInfo": [], + "networksInfo": [], + } + ) + + def get_default_update_spec(self, override_spec=None): + spec = deepcopy( + { + "name": "", + "description": "", + "ipAddresses": [], + } + ) + if override_spec: + for key in spec.keys(): + if override_spec.get(key): + spec[key] = deepcopy(override_spec[key]) + + return spec + + def _build_spec_name(self, payload, name): + if self.module.params.get("uuid"): + payload["name"] = name + else: + payload["clusterName"] = name + return payload, None + + def _build_spec_name_prefix(self, payload, prefix): + payload["agentVMPrefix"] = prefix + return payload, None + + def _build_spec_desc(self, payload, desc): + if self.module.params.get("uuid"): + payload["description"] = desc + else: + payload["clusterDescription"] = desc + return payload, None + + def _build_spec_cluster_ip(self, payload, cluster_ip): + if self.module.params.get("uuid"): + payload["ipAddresses"] = [cluster_ip] + else: + payload["clusterIP"] = cluster_ip + return payload, None + + def _build_spec_cluster_credentials(self, payload, credentials): + if self.module.params.get("uuid"): + payload["username"] = credentials["username"] + payload["password"] = credentials["password"] + else: + payload["credentialsInfo"] = [ + {"name": "username", "value": credentials["username"]}, + {"name": "password", "value": credentials["password"]}, + ] + return payload, None + + def _build_spec_agent_network(self, payload, agent_network): + payload["agentNetworkInfo"] = [ + {"name": "dns", "value": ",".join(agent_network["dns_servers"])}, + {"name": "ntp", "value": ",".join(agent_network["ntp_servers"])}, + ] + return payload, None + + def _build_spec_vlan_access(self, payload, vlans_config): + networks_info = [] + prism_vlan = self._generate_vlan_access_spec(vlans_config["prism_vlan"]) + prism_vlan["accessType"] = ["PRISM"] + + if vlans_config.get("dsip_vlan"): + dsip_vlan = self._generate_vlan_access_spec(vlans_config["dsip_vlan"]) + dsip_vlan["accessType"] = ["DSIP"] + networks_info.append(dsip_vlan) + else: + prism_vlan["accessType"].append("DSIP") + + if vlans_config.get("dbserver_vlan"): + dbserver_vlan = self._generate_vlan_access_spec( + vlans_config["dbserver_vlan"] + ) + dbserver_vlan["accessType"] = ["DBSERVER"] + networks_info.append(dbserver_vlan) + else: + prism_vlan["accessType"].append("DBSERVER") + networks_info.append(prism_vlan) + + payload["networksInfo"] = networks_info + return payload, None + + @staticmethod + def _generate_vlan_access_spec(vlan): + vlan_spec = { + "type": vlan["vlan_type"], + "networkInfo": [ + { + "name": "vlanName", + "value": vlan["vlan_name"], + }, + { + "name": "staticIP", + "value": vlan["static_ip"], + }, + { + "name": "gateway", + "value": vlan["gateway"], + }, + { + "name": "subnetMask", + "value": vlan["subnet_mask"], + }, + ], + } + return vlan_spec + + def _build_spec_storage_container(self, payload, storage_container): + payload["storageContainer"] = storage_container + return payload, None + + def _validate_cluster_deleting(self, cluster_uuid): + query = {"count_entities": True} + cluster = self.read(cluster_uuid, query=query) + + if cluster.get("entityCounts"): + if cluster["entityCounts"].get("dbServers") != 0: + return False + elif cluster["entityCounts"].get("engineCounts"): + for engine in cluster["entityCounts"]["engineCounts"].values(): + if engine["timeMachines"] != 0: + return False + return True + + def get_all_clusters_uuid_name_map(self): + resp = self.read() + uuid_name_map = {} + if not resp: + return uuid_name_map + + for cluster in resp: + uuid_name_map[cluster.get("id")] = cluster.get("name") + + return uuid_name_map + + def get_all_clusters_name_uuid_map(self): + resp = self.read() + name_uuid_map = {} + if not resp: + return name_uuid_map + + for cluster in resp: + if cluster.get("name"): + name_uuid_map[cluster.get("name")] = cluster.get("id") + + return name_uuid_map + # helper functions diff --git a/plugins/module_utils/ndb/database_clones.py b/plugins/module_utils/ndb/database_clones.py new file mode 100644 index 000000000..721bfddd0 --- /dev/null +++ b/plugins/module_utils/ndb/database_clones.py @@ -0,0 +1,334 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from copy import deepcopy + +from .database_engines.db_engine_factory import create_db_engine +from .db_server_vm import DBServerVM +from .nutanix_database import NutanixDatabase +from .profiles.profile_types import DatabaseParameterProfile +from .time_machines import TimeMachine + + +class DatabaseClone(NutanixDatabase): + resource_type = "/clones" + + def __init__(self, module): + + super(DatabaseClone, self).__init__(module, self.resource_type) + self.build_spec_methods = { + "name": self._build_spec_name, + "desc": self._build_spec_desc, + "db_params_profile": self._build_spec_db_params_profile, + "time_machine": self._build_spec_time_machine, + "removal_schedule": self._build_spec_removal_schedule, + "refresh_schedule": self._build_spec_refresh_schedule, + } + + def create(self, time_machine_uuid, data): + time_machine = TimeMachine(self.module) + endpoint = "{0}/clones".format(time_machine_uuid) + return time_machine.create(data=data, endpoint=endpoint) + + def refresh(self, uuid, data): + endpoint = "refresh" + return self.update(data=data, uuid=uuid, endpoint=endpoint, method="POST") + + def update( + self, + data=None, + uuid=None, + endpoint=None, + query=None, + raise_error=True, + no_response=False, + timeout=30, + method="PATCH", + ): + return super().update( + data, + uuid, + endpoint, + query, + raise_error, + no_response, + timeout, + method=method, + ) + + def format_response(self, response): + """This method formats the response. It removes attributes as per requirement.""" + attrs = [ + "accessLevel", + "category", + "placeholder", + "internal", + "databaseClusterType", + "ownerId", + "databaseStatus", + "dbserverlogicalCluster", + "databaseGroupStateInfo", + ] + for attr in attrs: + if attr in response: + response.pop(attr) + if response.get("metadata") is not None: + response["provisionOperationId"] = response.get("metadata", {}).get( + "provisionOperationId" + ) + response["sourceSnapshotId"] = response.get("metadata", {}).get( + "sourceSnapshotId" + ) + response["lastRefreshTimestamp"] = response.get("metadata", {}).get( + "lastRefreshTimestamp" + ) + response.pop("metadata") + + for node in response.get("databaseNodes", []): + DBServerVM.format_response(node) + + return response + + def _get_default_spec(self): + return deepcopy( + { + "name": "", + "description": "", + "clustered": False, + "nxClusterId": "", + "sshPublicKey": "", + "timeMachineId": "", + "snapshotId": None, + "userPitrTimestamp": None, + "timeZone": "", + "latestSnapshot": False, + "actionArguments": [], + "lcmConfig": {"databaseLCMConfig": {}}, + "databaseParameterProfileId": "", + } + ) + + def get_default_update_spec(self, override_spec=None): + spec = deepcopy( + { + "name": None, + "description": None, + "tags": [], + "resetTags": True, + "resetName": True, + "resetDescription": True, + "lcmConfig": {}, + "resetLcmConfig": False, + } + ) + if override_spec: + for key in spec.keys(): + if override_spec.get(key): + spec[key] = deepcopy(override_spec[key]) + + return spec + + def get_default_delete_spec(self): + return deepcopy({"remove": False, "delete": False, "softRemove": False}) + + def get_clone(self, uuid=None, name=None): + if uuid: + resp = self.read(uuid=uuid) + elif name: + query = {"value-type": "name", "value": name} + resp = self.read(query=query) + if not resp: + return None, "Clone with name {0} not found".format(name) + resp = resp[0] + else: + return None, "Please provide either uuid or name for fetching clone details" + + return resp, None + + def get_spec(self, old_spec=None, params=None, **kwargs): + if kwargs.get("create"): + return super().get_spec(old_spec=old_spec, params=params, **kwargs) + elif kwargs.get("update"): + return self.get_update_spec(old_spec=old_spec, params=params, **kwargs) + elif kwargs.get("delete"): + return self.get_delete_spec(old_spec=old_spec, params=params, **kwargs) + return None, "Please provide supported arguments" + + def get_db_engine_spec(self, payload, params=None, **kwargs): + + db_engine, err = create_db_engine(self.module, db_architecture="single") + if err: + return None, err + + db_type = db_engine.get_type() + + config = self.module.params.get(db_type) or params + + payload, err = db_engine.build_spec_db_clone_action_arguments(payload, config) + if err: + return None, err + + return payload, err + + def get_update_spec(self, old_spec=None, params=None, **kwargs): + self.build_spec_methods = { + "removal_schedule": self._build_spec_removal_schedule_update, + "refresh_schedule": self._build_spec_refresh_schedule_update, + "name": self._build_spec_name, + "desc": self._build_spec_desc, + } + + return super().get_spec(old_spec=old_spec, params=params, **kwargs) + + def get_delete_spec(self, old_spec=None, params=None, **kwargs): + payload = deepcopy(old_spec) + if self.module.params.get("delete_from_vm"): + payload["delete"] = True + elif self.module.params.get("soft_remove"): + payload["softRemove"] = True + else: + payload["remove"] = True + + return payload, None + + def get_clone_refresh_spec(self): + payload = {} + if self.module.params.get("snapshot_uuid"): + payload["snapshotId"] = self.module.params.get("snapshot_uuid") + elif self.module.params.get("pitr_timestamp"): + payload["userPitrTimestamp"] = self.module.params.get("pitr_timestamp") + else: + return ( + None, + "snapshot_uuid or pitr_timestamp is required for database clone refresh", + ) + + payload["timeZone"] = self.module.params.get("timezone") + return payload, None + + def _build_spec_desc(self, payload, desc): + payload["description"] = desc + return payload, None + + def _build_spec_name(self, payload, name): + payload["name"] = name + return payload, None + + def _build_spec_db_params_profile(self, payload, db_params_profile): + db_params = DatabaseParameterProfile(self.module) + uuid, err = db_params.get_profile_uuid(db_params_profile) + if err: + return None, err + + payload["databaseParameterProfileId"] = uuid + return payload, None + + def _build_spec_time_machine(self, payload, time_machine): + _time_machine = TimeMachine(self.module) + uuid, err = _time_machine.get_time_machine_uuid(time_machine) + if err: + return None, err + payload["timeMachineId"] = uuid + + if time_machine.get("snapshot_uuid"): + payload["snapshotId"] = time_machine.get("snapshot_uuid") + elif time_machine.get("pitr_timestamp"): + payload["userPitrTimestamp"] = time_machine.get("pitr_timestamp") + else: + return ( + None, + "Required snapshot_uuid or pitr_timestamp for source of db clone", + ) + + payload["timeZone"] = time_machine.get("timezone") + return payload, None + + def _build_spec_removal_schedule(self, payload, removal_schedule): + expiry_details = payload.get("lcmConfig", {}).get("expiryDetails", {}) + if not expiry_details: + expiry_details = {} + + # map of display name to api field names + args = { + "days": "expireInDays", + "timezone": "expiryDateTimezone", + "delete_database": "deleteDatabase", + "timestamp": "expiryTimestamp", + "remind_before_in_days": "remindBeforeInDays", + } + for key, val in args.items(): + if removal_schedule.get(key) is not None: + expiry_details[val] = removal_schedule.get(key) + + if not payload["lcmConfig"].get("databaseLCMConfig"): + payload["lcmConfig"]["databaseLCMConfig"] = { + "expiryDetails": expiry_details + } + else: + payload["lcmConfig"]["databaseLCMConfig"]["expiryDetails"] = expiry_details + + return payload, None + + def _build_spec_refresh_schedule(self, payload, refresh_schedule): + refresh_details = payload.get("lcmConfig", {}).get("refreshDetails", {}) + if not refresh_details: + refresh_details = {} + + # map of display name to api field names + args = { + "days": "refreshInDays", + "timezone": "refreshDateTimezone", + "time": "refreshTime", + } + + for key, val in args.items(): + if refresh_schedule.get(key): + refresh_details[val] = refresh_schedule.get(key) + + if not payload["lcmConfig"].get("databaseLCMConfig"): + payload["lcmConfig"]["databaseLCMConfig"] = { + "refreshDetails": refresh_details + } + else: + payload["lcmConfig"]["databaseLCMConfig"][ + "refreshDetails" + ] = refresh_details + + return payload, None + + def _build_spec_removal_schedule_update(self, payload, removal_schedule): + if removal_schedule.get("state", "present") == "absent": + payload["removeExpiryConfig"] = True + else: + + payload, err = self._build_spec_removal_schedule(payload, removal_schedule) + if err: + return None, err + payload["lcmConfig"]["expiryDetails"] = payload["lcmConfig"][ + "databaseLCMConfig" + ]["expiryDetails"] + payload["lcmConfig"].pop("databaseLCMConfig") + + # some changes for expiry timestamp + if removal_schedule.get("timestamp"): + payload["lcmConfig"]["expiryDetails"]["expireInDays"] = 0 + payload["resetLcmConfig"] = True + return payload, None + + def _build_spec_refresh_schedule_update(self, payload, refresh_schedule): + if refresh_schedule.get("state", "present") == "absent": + payload["removeRefreshConfig"] = True + else: + payload, err = self._build_spec_refresh_schedule(payload, refresh_schedule) + if err: + return None, err + + payload["lcmConfig"]["refreshDetails"] = payload["lcmConfig"][ + "databaseLCMConfig" + ]["refreshDetails"] + payload["lcmConfig"].pop("databaseLCMConfig") + payload["resetLcmConfig"] = True + return payload, None diff --git a/plugins/module_utils/ndb/database_engines/database_engine.py b/plugins/module_utils/ndb/database_engines/database_engine.py new file mode 100644 index 000000000..aade70764 --- /dev/null +++ b/plugins/module_utils/ndb/database_engines/database_engine.py @@ -0,0 +1,55 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class DatabaseEngine: + """ + Implement DatabaseEngine for all database engine types + """ + + _type = "" + + def __init__(self, module): + self.module = module + + def build_spec_db_instance_provision_action_arguments(self, payload, config): + """ + Implement this method to add db engine specific action arguments for database instance provision + """ + return payload, None + + def build_spec_db_instance_register_action_arguments(self, payload, config): + """ + Implement this method to add db engine specific action arguments for database instance registration + """ + return payload, None + + def build_spec_db_server_vm_register_action_arguments(self, payload, config): + """ + Implement this method to add database engine specific properties for registeration database server vm + """ + return payload, None + + def build_spec_db_clone_action_arguments(self, payload, config): + """ + Implement this method to add database engine specific properties for database clone provisioning + """ + return payload, None + + def build_spec_create_db_params_profile_properties(self, payload, config): + """ + Implement this method to add database engine specific properties in database parameter profile + """ + return payload, None + + def build_spec_update_db_params_profile_version(self, payload, config): + """ + Implement this method to update database engine specific properties in database parameter profile + """ + return payload, None + + def get_type(self): + return self._type diff --git a/plugins/module_utils/ndb/database_engines/db_engine_factory.py b/plugins/module_utils/ndb/database_engines/db_engine_factory.py new file mode 100644 index 000000000..1467b42ef --- /dev/null +++ b/plugins/module_utils/ndb/database_engines/db_engine_factory.py @@ -0,0 +1,56 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +from ...constants import NDB +from ..database_engines.postgres import ( + Postgres, + PostgresHAInstance, + PostgresSingleInstance, +) + + +def get_engine_type(module): + engine_types = NDB.DatabaseTypes.ALL + + for type in engine_types: + if type in module.params: + return type, None + + return None, "Input doesn't conatains config for allowed engine types of databases" + + +def create_db_engine(module, engine_type=None, db_architecture=None): + engines = { + "postgres": { + "single": PostgresSingleInstance, + "ha": PostgresHAInstance, + "default": Postgres, + }, + # for profile version update + "postgres_database": { + "single": PostgresSingleInstance, + "ha": PostgresHAInstance, + "default": Postgres, + }, + } + + if not engine_type: + engine_type, err = get_engine_type(module) + if err: + return None, err + + if engine_type in engines: + if ( + db_architecture + and isinstance(engines[engine_type], dict) + and db_architecture in engines[engine_type] + ): + return engines[engine_type][db_architecture](module), None + else: + return engines[engine_type]["default"](module), None + else: + return None, "Invalid database engine type: {0}".format(engine_type) diff --git a/plugins/module_utils/ndb/database_engines/postgres.py b/plugins/module_utils/ndb/database_engines/postgres.py new file mode 100644 index 000000000..223cad085 --- /dev/null +++ b/plugins/module_utils/ndb/database_engines/postgres.py @@ -0,0 +1,258 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +from ...constants import NDB +from .database_engine import DatabaseEngine + + +class Postgres(DatabaseEngine): + def __init__(self, module): + self._type = NDB.DatabaseTypes.POSTGRES + super(Postgres, self).__init__(module) + + def build_spec_create_db_params_profile_properties(self, payload, db_params): + properties = payload.get("properties", []) + + # map of properties with defaults + default_properties = { + "max_connections": 100, + "max_replication_slots": 10, + "effective_io_concurrency": 1, + "timezone": "UTC", + "max_prepared_transactions": 0, + "max_locks_per_transaction": 64, + "max_wal_senders": 10, + "max_worker_processes": 8, + "checkpoint_completion_target": 0.5, + "autovacuum": "on", + "autovacuum_freeze_max_age": 200000000, + "autovacuum_vacuum_threshold": 50, + "autovacuum_vacuum_scale_factor": 0.2, + "autovacuum_work_mem": -1, + "autovacuum_max_workers": 3, + "wal_buffers": -1, + "synchronous_commit": "on", + "random_page_cost": 4, + "wal_keep_segments": 700, + "min_wal_size": 80, + "max_wal_size": 1, + "checkpoint_timeout": 5, + "autovacuum_vacuum_cost_delay": 2, + } + + # map of certain properties with their units + property_units = { + "min_wal_size": "MB", + "max_wal_size": "GB", + "checkpoint_timeout": "min", + "autovacuum_vacuum_cost_delay": "ms", + } + + # create new properties spec + for name, val in default_properties.items(): + + # check if property value is given in input + if name in db_params: + val = str(db_params.get(name)) + + val = str(val) + property_units.get(name, "") + + spec = {"name": name, "value": val} + properties.append(spec) + + payload["properties"] = properties + return payload, None + + def build_spec_update_db_params_profile_version(self, payload, db_params): + + # map of certain properties with their units + property_units = { + "min_wal_size": "MB", + "max_wal_size": "GB", + "checkpoint_timeout": "min", + "autovacuum_vacuum_cost_delay": "ms", + } + + properties = payload.get("properties", []) + + # update properties + for prop in properties: + + # check if property value is given in input + if prop["name"] in db_params: + val = str(db_params.get(prop["name"])) + val = val + property_units.get(prop["name"], "") + prop["value"] = val + + payload["properties"] = properties + return payload, None + + +class PostgresSingleInstance(Postgres): + def __init__(self, module): + super(PostgresSingleInstance, self).__init__(module) + + def build_spec_db_instance_provision_action_arguments(self, payload, config): + + action_arguments = payload.get("actionArguments", []) + # fields to their defaults maps + args = { + "listener_port": "", + "auto_tune_staging_drive": False, + "allocate_pg_hugepage": False, + "cluster_database": False, + "auth_method": "", + "db_password": "", + "pre_create_script": "", + "post_create_script": "", + } + # create action arguments + for key, value in args.items(): + spec = {"name": key, "value": config.get(key, value)} + action_arguments.append(spec) + + # handle scenariors where display names are diff + action_arguments.append( + {"name": "database_names", "value": config.get("db_name")} + ) + action_arguments.append( + {"name": "database_size", "value": str(config.get("db_size"))} + ) + + payload["actionArguments"] = action_arguments + return payload, None + + def build_spec_db_instance_register_action_arguments(self, payload, config): + action_arguments = payload.get("actionArguments", []) + + # List of action arguments for postgres registration + args = [ + "listener_port", + "db_name", + "db_user", + "db_password", + ] + + # create action arguments + for arg in args: + if arg in config: + spec = {"name": arg, "value": config[arg]} + action_arguments.append(spec) + + if config.get("software_path"): + action_arguments.append( + {"name": "postgres_software_home", "value": config["software_path"]} + ) + + payload["actionArguments"] = action_arguments + return payload, None + + def build_spec_db_server_vm_register_action_arguments(self, payload, config): + action_arguments = payload.get("actionArguments", []) + + action_arguments.append( + {"name": "postgres_software_home", "value": config.get("software_path", "")} + ) + + action_arguments.append( + {"name": "listener_port", "value": config.get("listener_port", "")} + ) + + payload["actionArguments"] = action_arguments + return payload, None + + def build_spec_db_clone_action_arguments(self, payload, config): + action_arguments = payload.get("actionArguments", []) + # fields to their defaults maps + args = { + "db_password": "", + "pre_clone_cmd": "", + "post_clone_cmd": "", + } + + # create action arguments + for key, value in args.items(): + spec = {"name": key, "value": config.get(key, value)} + action_arguments.append(spec) + + payload["actionArguments"] = action_arguments + return payload, None + + +class PostgresHAInstance(Postgres): + def __init__(self, module): + super(PostgresHAInstance, self).__init__(module) + + def build_spec_db_instance_provision_action_arguments(self, payload, config): + + action_arguments = payload.get("actionArguments", []) + # Below is a map of common action arguments fields to their default value + args = { + "listener_port": "", + "allocate_pg_hugepage": False, + "cluster_database": False, + "db_password": "", + "pre_create_script": "", + "post_create_script": "", + "patroni_cluster_name": "", + "archive_wal_expire_days": "", + "enable_synchronous_mode": False, + "enable_peer_auth": False, + "node_type": "database", + "backup_policy": "primary_only", + "failover_mode": "Automatic", + } + + # create action arguments + for key, default in args.items(): + spec = {"name": key, "value": config.get(key, default)} + action_arguments.append(spec) + + # handle scenariors where display names are different + action_arguments.append( + {"name": "database_names", "value": config.get("db_name")} + ) + action_arguments.append( + {"name": "database_size", "value": str(config.get("db_size"))} + ) + + # for HA instance, add HA proxy related action arguments and vm details if required + ha_proxy = config.get("ha_proxy") + if ha_proxy: + action_arguments, err = self._build_spec_ha_proxy_action_arguments( + ha_proxy, action_arguments=action_arguments + ) + if err: + return None, err + + payload["actionArguments"] = action_arguments + return payload, None + + def _build_spec_ha_proxy_action_arguments(self, ha_proxy, action_arguments=None): + + if not action_arguments: + action_arguments = [] + + action_arguments.append( + { + "name": "provision_virtual_ip", + "value": ha_proxy.get("provision_virtual_ip", False), + } + ) + + if ha_proxy.get("write_port"): + action_arguments.append( + {"name": "proxy_write_port", "value": ha_proxy["write_port"]} + ) + + if ha_proxy.get("read_port"): + action_arguments.append( + {"name": "proxy_read_port", "value": ha_proxy["read_port"]} + ) + + action_arguments.append({"name": "deploy_haproxy", "value": True}) + return action_arguments, None diff --git a/plugins/module_utils/ndb/database_instances.py b/plugins/module_utils/ndb/database_instances.py new file mode 100644 index 000000000..77bacde4a --- /dev/null +++ b/plugins/module_utils/ndb/database_instances.py @@ -0,0 +1,422 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from copy import deepcopy + +from ..constants import NDB +from .database_engines.db_engine_factory import create_db_engine, get_engine_type +from .db_server_vm import DBServerVM +from .nutanix_database import NutanixDatabase +from .profiles.profile_types import DatabaseParameterProfile +from .time_machines import TimeMachine + + +class DatabaseInstance(NutanixDatabase): + resource_type = "/databases" + + def __init__(self, module): + + super(DatabaseInstance, self).__init__(module, self.resource_type) + self.build_spec_methods = { + "auto_tune_staging_drive": self._build_spec_auto_tune_staging_drive, + } + + def provision(self, data): + endpoint = "provision" + return self.create(data, endpoint, timeout=60) + + def register(self, data): + endpoint = "register" + return self.create(data, endpoint) + + def scale(self, uuid, data): + endpoint = "update/extend-storage" + return self.update(data=data, uuid=uuid, endpoint=endpoint, method="POST") + + def restore(self, uuid, data): + endpoint = "restore" + return self.update(data=data, uuid=uuid, endpoint=endpoint, method="POST") + + def add_databases(self, instance_uuid, data): + endpoint = "linked-databases" + return self.update( + data=data, uuid=instance_uuid, endpoint=endpoint, method="POST" + ) + + def remove_linked_database(self, linked_database_uuid, database_instance_uuid): + spec = {"delete": True, "forced": True} + endpoint = "linked-databases/{0}".format(linked_database_uuid) + return self.delete(uuid=database_instance_uuid, endpoint=endpoint, data=spec) + + def update( + self, + data=None, + uuid=None, + endpoint=None, + query=None, + raise_error=True, + no_response=False, + timeout=30, + method="PATCH", + ): + return super().update( + data, + uuid, + endpoint, + query, + raise_error, + no_response, + timeout, + method=method, + ) + + def get_uuid( + self, + value, + key="name", + data=None, + entity_type=None, + raise_error=True, + no_response=False, + ): + query = {"value-type": key, "value": value} + resp = self.read(query=query) + + if not resp: + return None, "Database instance with name {0} not found.".format(value) + + uuid = resp[0].get("id") + return uuid, None + + @staticmethod + def format_response(response): + """This method formats the response. It removes attributes as per requirement.""" + attrs = [ + "lcmConfig", + "accessLevel", + "category", + "placeholder", + "internal", + "databaseGroupStateInfo", + "databaseClusterType", + "parentTimeMachineId", + "parentSourceDatabaseId", + "ownerId", + "databaseStatus", + "groupInfo", + ] + for attr in attrs: + if attr in response: + response.pop(attr) + + if response.get("metadata") is not None: + response["provisionOperationId"] = response.get("metadata", {}).get( + "provisionOperationId" + ) + response.pop("metadata") + + # format database node's responses + for node in response.get("databaseNodes", []): + DBServerVM.format_response(node) + node.pop("dbserver") + + # format time machine's response + if response.get("timeMachine"): + TimeMachine.format_response(response.get("timeMachine")) + + return response + + def _get_action_argument_spec(self, name, value): + return deepcopy({"name": name, "value": value}) + + def get_default_provision_spec(self): + return deepcopy( + { + "databaseType": None, + "name": None, + "dbParameterProfileId": None, + "actionArguments": [], + "clustered": False, + "autoTuneStagingDrive": True, + "tags": [], + } + ) + + def get_default_registration_spec(self): + return deepcopy( + { + "databaseType": "", + "databaseName": "", + "workingDirectory": "", + "actionArguments": [], + "autoTuneStagingDrive": True, + } + ) + + def get_default_update_spec(self, override_spec=None): + spec = deepcopy( + { + "name": None, + "description": None, + "tags": [], + "resetTags": True, + "resetName": True, + "resetDescription": True, + } + ) + if override_spec: + for key in spec.keys(): + if override_spec.get(key): + spec[key] = deepcopy(override_spec[key]) + + return spec + + def _get_default_scaling_spec(self): + return deepcopy( + { + "actionArguments": [ + {"name": "working_dir", "value": "/tmp"}, + ], + "applicationType": None, + } + ) + + def get_default_restore_spec(self): + return deepcopy( + { + "snapshotId": None, + "latestSnapshot": None, + "userPitrTimestamp": None, + "timeZone": None, + "actionArguments": [{"name": "sameLocation", "value": True}], + } + ) + + def get_database(self, name=None, uuid=None, query=None): + if uuid: + resp = self.read(uuid=uuid, query=query, raise_error=False) + elif name: + query_params = {"value-type": "name", "value": name} + if query: + query.update(query_params) + else: + query = query_params + resp = self.read(query=query) + if not resp: + return None, "Database with name {0} not found".format(name) + if isinstance(resp, list): + resp = resp[0] + return resp, None + else: + return ( + None, + "Please provide either uuid or name for fetching database details", + ) + + return resp, None + + def get_spec(self, old_spec=None, params=None, **kwargs): + # handle registration and provisioning from this factory itself + + if kwargs.get("update"): + return self.get_update_spec(old_spec=old_spec, params=params, **kwargs) + elif kwargs.get("provision"): + return self.get_spec_for_provision( + old_spec=old_spec, params=params, **kwargs + ) + elif kwargs.get("register"): + return self.get_spec_for_registration( + old_spec=old_spec, params=params, **kwargs + ) + + return None, "Please provide supported arguments" + + def get_update_spec(self, old_spec=None, params=None, **kwargs): + self.build_spec_methods = { + "name": self.build_spec_name, + "desc": self.build_spec_desc, + } + return super().get_spec(old_spec=old_spec, params=params, **kwargs) + + def get_spec_for_provision(self, old_spec=None, params=None, **kwargs): + self.build_spec_methods = { + "name": self.build_spec_name, + "db_params_profile": self.build_spec_db_params_profile, + "desc": self._build_spec_database_desc, + } + payload, err = super().get_spec(old_spec=old_spec, params=params, **kwargs) + + if self.module.params.get("auto_tune_staging_drive") is not None: + payload["autoTuneStagingDrive"] = self.module.params.get( + "auto_tune_staging_drive" + ) + + return payload, err + + def get_spec_for_registration(self, old_spec=None, params=None, **kwargs): + self.build_spec_methods = { + "working_directory": self._build_spec_register_working_dir, + "name": self._build_spec_register_name, + "desc": self.build_spec_desc, + } + + payload, err = super().get_spec(old_spec=old_spec, params=params, **kwargs) + + if self.module.params.get("auto_tune_staging_drive") is not None: + payload["autoTuneStagingDrive"] = self.module.params.get( + "auto_tune_staging_drive" + ) + + return payload, err + + def get_engine_type(self): + engine_types = NDB.DatabaseTypes.ALL + + for type in engine_types: + if type in self.module.params: + return type, None + + return ( + None, + "Input doesn't conatains config for allowed engine types of databases", + ) + + def get_db_engine_spec(self, payload, params=None, **kwargs): + + db_engine_type, err = get_engine_type(self.module) + if err: + return None, err + + config = self.module.params.get(db_engine_type) or params + + if not config: + return None, "input for database engine is missing, {0}".format( + db_engine_type + ) + + db_architecture = config.get("type") + + db_engine, err = create_db_engine( + self.module, engine_type=db_engine_type, db_architecture=db_architecture + ) + if err: + return None, err + + if kwargs.get("provision"): + + payload, err = db_engine.build_spec_db_instance_provision_action_arguments( + payload, config + ) + if err: + return None, err + + elif kwargs.get("register"): + payload, err = db_engine.build_spec_db_instance_register_action_arguments( + payload, config + ) + if err: + return None, err + + payload["databaseType"] = db_engine_type + "_database" + return payload, err + + def get_delete_spec(self): + spec = { + "delete": False, + "remove": False, + "softRemove": False, + "deleteTimeMachine": False, + "deleteLogicalCluster": False, + } + + if self.module.params.get("soft_delete"): + spec["softRemove"] = True + elif self.module.params.get("delete_db_from_vm"): + spec["delete"] = True + else: + spec["remove"] = True + + if self.module.params.get("delete_time_machine"): + spec["deleteTimeMachine"] = True + + return spec + + def get_scaling_spec(self, scale_config, database_type): + config = deepcopy(scale_config) + spec = self._get_default_scaling_spec() + + spec["applicationType"] = database_type + + spec["actionArguments"].append( + self._get_action_argument_spec( + "data_storage_size", int(config.get("storage_gb")) + ) + ) + spec["actionArguments"].append( + self._get_action_argument_spec( + "pre_script_cmd", config.get("pre_update_cmd") + ) + ) + spec["actionArguments"].append( + self._get_action_argument_spec( + "post_script_cmd", config.get("post_update_cmd") + ) + ) + + return spec + + def get_restore_spec(self, restore_config): + spec = self.get_default_restore_spec() + if restore_config.get("snapshot_uuid"): + spec["snapshotId"] = restore_config["snapshot_uuid"] + elif restore_config.get("pitr_timestamp"): + spec["userPitrTimestamp"] = restore_config["pitr_timestamp"] + spec["timeZone"] = restore_config.get("timezone") + else: + spec["latestSnapshot"] = True + + return spec + + def get_add_database_spec(self, database_names): + spec = {"databases": []} + + for name in database_names: + spec["databases"].append({"databaseName": name}) + + return spec + + def build_spec_desc(self, payload, desc): + payload["description"] = desc + return payload, None + + def build_spec_name(self, payload, name): + payload["name"] = name + return payload, None + + def build_spec_db_params_profile(self, payload, db_params_profile): + db_params = DatabaseParameterProfile(self.module) + uuid, err = db_params.get_profile_uuid(db_params_profile) + if err: + return None, err + + payload["dbParameterProfileId"] = uuid + return payload, None + + def _build_spec_database_desc(self, payload, desc): + payload["databaseDescription"] = desc + return payload, None + + def _build_spec_register_name(self, payload, name): + payload["databaseName"] = name + return payload, None + + def _build_spec_register_working_dir(self, payload, working_dir): + payload["workingDirectory"] = working_dir + return payload, None + + def _build_spec_auto_tune_staging_drive(self, payload, value): + payload["autoTuneStagingDrive"] = value + return payload, None diff --git a/plugins/module_utils/ndb/databases.py b/plugins/module_utils/ndb/databases.py deleted file mode 100644 index 9235a4f82..000000000 --- a/plugins/module_utils/ndb/databases.py +++ /dev/null @@ -1,351 +0,0 @@ -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -from copy import deepcopy - -from ..constants import NDB as NDB_CONSTANTS -from .clusters import get_cluster_uuid -from .db_servers import get_db_server_uuid -from .nutanix_database import NutanixDatabase -from .profiles import Profile, get_profile_uuid -from .slas import get_sla_uuid -from .tags import Tag - -__metaclass__ = type - - -class Database(NutanixDatabase): - def __init__(self, module): - resource_type = "/databases" - super(Database, self).__init__(module, resource_type=resource_type) - self.build_spec_methods = { - "name": self._build_spec_name, - "desc": self._build_spec_desc, - "auto_tune_staging_drive": self._build_spec_auto_tune_staging_drive, - "db_params_profile": self._build_spec_db_params_profile, - "db_vm": self._build_spec_db_vm, - "time_machine": self._build_spec_time_machine, - "postgres": self._build_spec_postgres, - "tags": self._build_spec_tags, - } - - def get_uuid( - self, - value, - key="name", - data=None, - entity_type=None, - raise_error=True, - no_response=False, - ): - query = {"value-type": key, "value": value} - resp = self.read(query=query) - - if not resp: - return None, "Database instance with name {0} not found.".format(value) - - uuid = resp[0].get("id") - return uuid, None - - def create( - self, - data=None, - endpoint=None, - query=None, - method="POST", - raise_error=True, - no_response=False, - timeout=30, - ): - endpoint = "provision" - return super().create( - data, endpoint, query, method, raise_error, no_response, timeout - ) - - def update( - self, - data=None, - uuid=None, - endpoint=None, - query=None, - raise_error=True, - no_response=False, - timeout=30, - method="PATCH", - ): - return super().update( - data, uuid, endpoint, query, raise_error, no_response, timeout, method - ) - - def get_database(self, name=None, uuid=None): - default_query = {"detailed": True} - if uuid: - resp = self.read(uuid=uuid, query=default_query) - elif name: - query = {"value-type": "name", "value": name} - query.update(deepcopy(default_query)) - resp = self.read(query=query) - if not resp: - return None, "Database with name {0} not found".format(name) - if isinstance(resp, list): - resp = resp[0] - return resp, None - else: - return ( - None, - "Please provide either uuid or name for fetching database details", - ) - - return resp, None - - def _get_default_spec(self): - return deepcopy( - { - "databaseType": None, - "name": None, - "dbParameterProfileId": None, - "timeMachineInfo": { - "name": None, - "slaId": None, - "schedule": {}, - "autoTuneLogDrive": True, - }, - "actionArguments": [], - "nodes": [], - "nodeCount": 1, - "clustered": False, - "autoTuneStagingDrive": True, - "tags": [], - } - ) - - def get_default_update_spec(self, override_spec=None): - spec = deepcopy( - { - "name": None, - "description": None, - "tags": [], - "resetTags": True, - "resetName": True, - "resetDescription": True, - } - ) - if override_spec: - for key in spec.keys(): - if override_spec.get(key): - spec[key] = deepcopy(override_spec[key]) - - return spec - - def get_default_delete_spec(self): - return deepcopy( - { - "delete": False, - "remove": False, - "deleteTimeMachine": False, - "deleteLogicalCluster": True, - } - ) - - def _build_spec_name(self, payload, name): - payload["name"] = name - return payload, None - - def _build_spec_desc(self, payload, desc): - payload["databaseDescription"] = desc - return payload, None - - def _build_spec_auto_tune_staging_drive(self, payload, value): - payload["autoTuneStagingDrive"] = value - return payload, None - - def _build_spec_db_params_profile(self, payload, db_params_profile): - uuid, err = get_profile_uuid( - self.module, "Database_Parameter", db_params_profile - ) - if err: - return None, err - - payload["dbParameterProfileId"] = uuid - return payload, None - - def _build_spec_db_vm(self, payload, db_vm): - if db_vm.get("use_registered_server"): - - uuid, err = get_db_server_uuid(self.module, db_vm["use_registered_server"]) - if err: - return None, err - - payload["createDbserver"] = False - payload["dbserverId"] = uuid - payload["nodes"] = [{"properties": [], "dbserverId": uuid}] - - else: - vm_config = db_vm["create_new_server"] - - # set compute profile - uuid, err = get_profile_uuid( - self.module, "Compute", vm_config["compute_profile"] - ) - if err: - return None, err - payload["computeProfileId"] = uuid - - # set software profile - uuid, err = get_profile_uuid( - self.module, "Software", vm_config["software_profile"] - ) - if err: - return None, err - - payload["softwareProfileId"] = uuid - if vm_config["software_profile"].get("version_id"): - payload["softwareProfileVersionId"] = vm_config["software_profile"][ - "version_id" - ] - else: - profiles = Profile(self.module) - software_profile = profiles.read(uuid) - payload["softwareProfileId"] = uuid - payload["softwareProfileVersionId"] = software_profile[ - "latestVersionId" - ] - - # set network prfile - uuid, err = get_profile_uuid( - self.module, "Network", vm_config["network_profile"] - ) - if err: - return None, err - - payload["nodes"] = [ - { - "properties": [], - "vmName": vm_config["name"], - "networkProfileId": uuid, - } - ] - payload["networkProfileId"] = uuid - - # set cluster config - uuid, err = get_cluster_uuid(self.module, vm_config["cluster"]) - if err: - return None, err - payload["nxClusterId"] = uuid - - # set other params - payload["sshPublicKey"] = vm_config["pub_ssh_key"] - payload["vmPassword"] = vm_config["password"] - payload["createDbserver"] = True - - return payload, None - - def _build_spec_time_machine(self, payload, time_machine): - - # set sla uuid - uuid, err = get_sla_uuid(self.module, time_machine["sla"]) - if err: - return None, err - - time_machine_spec = {} - time_machine_spec["slaId"] = uuid - - schedule = time_machine.get("schedule") - schedule_spec = {} - if schedule.get("daily"): - - time = schedule["daily"].split(":") - if len(time) != 3: - return None, "Daily snapshot schedule not in HH:MM:SS format." - - schedule_spec["snapshotTimeOfDay"] = { - "hours": int(time[0]), - "minutes": int(time[1]), - "seconds": int(time[2]), - } - - if schedule.get("weekly"): - schedule_spec["weeklySchedule"] = { - "enabled": True, - "dayOfWeek": schedule["weekly"], - } - - if schedule.get("monthly"): - schedule_spec["monthlySchedule"] = { - "enabled": True, - "dayOfMonth": schedule["monthly"], - } - - # set quaterly and yearly as they are dependent on monthly - if schedule.get("quaterly"): - schedule_spec["quartelySchedule"] = { - "enabled": True, - "startMonth": schedule["quaterly"], - "dayOfMonth": schedule.get("monthly"), - } - - if schedule.get("yearly"): - schedule_spec["yearlySchedule"] = { - "enabled": True, - "month": schedule["yearly"], - "dayOfMonth": schedule.get("monthly"), - } - - if schedule.get("log_catchup") or schedule.get("snapshots_per_day"): - schedule_spec["continuousSchedule"] = { - "enabled": True, - "logBackupInterval": schedule.get("log_catchup"), - "snapshotsPerDay": schedule.get("snapshots_per_day"), - } - - time_machine_spec["schedule"] = schedule_spec - time_machine_spec["name"] = time_machine["name"] - time_machine_spec["description"] = time_machine.get("desc", "") - time_machine_spec["autoTuneLogDrive"] = time_machine.get("auto_tune_log_drive") - payload["timeMachineInfo"] = time_machine_spec - return payload, None - - def _build_spec_postgres(self, payload, postgres): - action_arguments = [] - - # fields to their defaults maps - args = { - "listener_port": "", - "auto_tune_staging_drive": False, - "allocate_pg_hugepage": False, - "cluster_database": False, - "auth_method": "", - "db_password": "", - "pre_create_script": "", - "post_create_script": "", - } - - # create action arguments - for key, value in args.items(): - spec = {"name": key, "value": postgres.get(key, value)} - action_arguments.append(spec) - - # handle scenariors where display names are diff - action_arguments.append( - {"name": "database_names", "value": postgres.get("db_name")} - ) - action_arguments.append( - {"name": "database_size", "value": str(postgres.get("db_size"))} - ) - - payload["actionArguments"] = action_arguments - payload["databaseType"] = NDB_CONSTANTS.DatabaseTypes.POSTGRES - return payload, None - - def _build_spec_tags(self, payload, tags): - _tags = Tag(self.module) - name_uuid_map = _tags.get_all_name_uuid_map() - specs = [] - for name, val in tags.items(): - if name not in name_uuid_map: - return None, "Tag with name {0} not found".format(name) - spec = {"tagId": name_uuid_map[name], "tagName": name, "value": val} - specs.append(spec) - payload["tags"] = specs - return payload, None diff --git a/plugins/module_utils/ndb/db_server_cluster.py b/plugins/module_utils/ndb/db_server_cluster.py new file mode 100644 index 000000000..5b0c4b680 --- /dev/null +++ b/plugins/module_utils/ndb/db_server_cluster.py @@ -0,0 +1,167 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +from copy import deepcopy + +from .clusters import Cluster +from .db_server_vm import DBServerVM +from .nutanix_database import NutanixDatabase + +__metaclass__ = type + + +class DBServerCluster(NutanixDatabase): + def __init__(self, module): + resource_type = "/dpcs" + super(DBServerCluster, self).__init__(module, resource_type=resource_type) + self.build_spec_methods = { + "name": self._build_spec_name, + "desc": self._build_spec_desc, + "ips": self._build_spec_ips, + } + + def get_all_clusters_name_uuid_map(self): + resp = self.read() + name_uuid_map = {} + for cluster in resp: + if cluster.get("name") and cluster.get("id"): + name_uuid_map[cluster.get("name")] = cluster.get("id") + + return name_uuid_map + + def get_spec(self, old_spec=None, params=None, **kwargs): + + # if db server vm cluster is required for db instance + if kwargs.get("db_instance_provision"): + if self.module.params.get("db_server_cluster", {}).get("new_cluster"): + payload, err = self.get_spec_provision_for_db_instance(payload=old_spec) + return payload, err + else: + return ( + None, + "Spec builder for DB server cluster registration for HA instance is not implemented", + ) + + elif kwargs.get("db_server_cluster"): + return ( + None, + "Spec builder for DB server cluster provision or register is not implemented", + ) + + return None, "Please provide supported arguments" + + def get_default_delete_spec(self, **kwargs): + delete = kwargs.get("delete", False) + return deepcopy( + { + "delete": delete, + "forced": False, + "softRemove": False, + "remove": not delete, + "dbservers": { + "remove": not delete, + "delete": delete, + "deleteVgs": False, + "deleteVmSnapshots": False, + }, + } + ) + + def get_default_spec_for_db_instance(self): + return deepcopy( + { + "nodes": [{"properties": [], "vmName": "", "networkProfileId": ""}], + "nxClusterId": "", + } + ) + + # this routine populates spec for provisioning db server VM cluster for database instance + def get_spec_provision_for_db_instance(self, payload): + + db_server_vm = DBServerVM(self.module) + + self.build_spec_methods.update( + { + "compute_profile": db_server_vm.build_spec_compute_profile, + "software_profile": db_server_vm.build_spec_software_profile, + "network_profile": db_server_vm.build_spec_network_profile, + "password": db_server_vm.build_spec_password, + "cluster": db_server_vm.build_spec_cluster, + "pub_ssh_key": db_server_vm.build_spec_pub_ssh_key, + } + ) + + config = self.module.params.get("db_server_cluster", {}).get("new_cluster", {}) + if not config: + return ( + None, + "'db_server_cluster.new_cluster' is required for creating spec for new db server vm cluster", + ) + + payload, err = super().get_spec(old_spec=payload, params=config) + if err: + return None, err + + # configure spec for group of vms for cluster + # will send defaults in kwargs + kwargs = { + "network_profile_uuid": payload.get("networkProfileId"), + "compute_profile_uuid": payload.get("computeProfileId"), + "cluster_uuid": payload.get("nxClusterId"), + } + payload, err = db_server_vm.build_spec_vms( + payload, config.get("vms", []), **kwargs + ) + if err: + return None, err + + payload["clustered"] = True + payload["createDbserver"] = True + return payload, err + + # builder methods for vm + def _build_spec_name(self, payload, name): + action_arguments = payload.get("actionArguments", []) + action_arguments.append({"name": "cluster_name", "value": name}) + payload["actionArguments"] = action_arguments + return payload, None + + def _build_spec_desc(self, payload, desc): + action_arguments = payload.get("actionArguments", []) + action_arguments.append({"name": "cluster_description", "value": desc}) + payload["actionArguments"] = action_arguments + + return payload, None + + def _build_spec_ips(self, payload, cluster_ip_infos): + cluster = Cluster(self.module) + clusters = cluster.get_all_clusters_name_uuid_map() + + specs = [] + for ip_info in cluster_ip_infos: + spec = { + "ipInfos": [ + {"ipType": "CLUSTER_IP", "ipAddresses": [ip_info.get("ip")]} + ] + } + + # add cluster spec + cluster_uuid = "" + if ip_info["cluster"].get("name"): + if clusters.get(ip_info["cluster"]["name"]): + cluster_uuid = clusters[ip_info["cluster"]["name"]] + else: + return None, "NDB cluster with name '{0}' not found".format( + ip_info["cluster"]["name"] + ) + + elif ip_info["cluster"].get("uuid"): + cluster_uuid = ip_info["cluster"]["uuid"] + + spec["nxClusterId"] = cluster_uuid + specs.append(spec) + + payload["clusterInfo"] = {"clusterIpInfos": specs} + + return payload, None diff --git a/plugins/module_utils/ndb/db_server_vm.py b/plugins/module_utils/ndb/db_server_vm.py new file mode 100644 index 000000000..dee8548ba --- /dev/null +++ b/plugins/module_utils/ndb/db_server_vm.py @@ -0,0 +1,837 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +from copy import deepcopy + +__metaclass__ = type + + +from .clusters import Cluster, get_cluster_uuid +from .database_engines.db_engine_factory import create_db_engine +from .nutanix_database import NutanixDatabase +from .profiles.profile_types import ComputeProfile, NetworkProfile, SoftwareProfile +from .time_machines import TimeMachine + + +class DBServerVM(NutanixDatabase): + def __init__(self, module): + + resource_type = "/dbservers" + super(DBServerVM, self).__init__(module, resource_type=resource_type) + self.build_spec_methods = { + "compute_profile": self.build_spec_compute_profile, + "software_profile": self.build_spec_software_profile, + "network_profile": self.build_spec_network_profile, + "cluster": self.build_spec_cluster, + "password": self.build_spec_password, + } + + def provision(self, data): + endpoint = "provision" + return self.create(data, endpoint) + + def register(self, data): + endpoint = "register" + return self.create(data, endpoint) + + def update( + self, + data=None, + uuid=None, + endpoint=None, + query=None, + raise_error=True, + no_response=False, + timeout=30, + method="PATCH", + ): + return super().update( + data, uuid, endpoint, query, raise_error, no_response, timeout, method + ) + + def get_uuid( + self, + value, + key="name", + data=None, + entity_type=None, + raise_error=True, + no_response=False, + ): + query = {"value-type": key, "value": value} + resp = self.read(query=query) + + if not resp: + return None, "DB server vm with name {0} not found.".format(value) + + uuid = resp[0].get("id") + return uuid, None + + def get_all_db_servers_name_uuid_map(self): + resp = self.read() + name_uuid_map = {} + for vm in resp: + if vm.get("name") and vm.get("id"): + name_uuid_map[vm.get("name")] = vm.get("id") + + return name_uuid_map + + def get_db_server(self, name=None, uuid=None, ip=None, query=None): + resp = None + if uuid: + resp = self.read(uuid=uuid, query=query) + elif name or ip: + key = "name" if name else "ip" + val = name if name else ip + query_params = {"value-type": key, "value": val} + if query: + query.update(query_params) + else: + query = query_params + resp = self.read(query=query) + if not resp: + return None, "Database server with {0} {1} not found".format(key, val) + resp = resp[0] + else: + return ( + None, + "Please provide uuid, name or server IP for fetching database server details", + ) + + return resp, None + + def get_db_server_uuid(self, config): + if "name" in config: + name = config["name"] + uuid, err = self.get_uuid(name) + if err: + return None, err + elif "uuid" in config: + uuid = config["uuid"] + else: + error = "Config {0} doesn't have name or uuid key".format(config) + return None, error + + return uuid, None + + @staticmethod + def format_response(response): + """This method formats response of db server vm. It removes attributes as per requirement.""" + attrs = [ + "accessKeyId", + "lcmConfig", + "category", + "placeholder", + "accessKey", + "accessLevel", + "ownerId", + "softwareInstallationId", + ] + for attr in attrs: + if attr in response: + response.pop(attr) + + if response.get("metadata") is not None: + response["provisionOperationId"] = response.get("metadata", {}).get( + "provisionOperationId" + ) + + response.pop("metadata") + + return response + + def get_default_spec_for_provision(self): + return deepcopy( + { + "actionArguments": [], + "nxClusterId": "", + "databaseType": "", + "latestSnapshot": False, + "networkProfileId": "", + "softwareProfileId": "", + "softwareProfileVersionId": "", + "computeProfileId": "", + "vmPassword": None, + } + ) + + def get_default_spec_for_registration(self): + return deepcopy( + { + "nxClusterId": "", + "vmIp": "", + "resetDescriptionInNxCluster": False, + "forcedInstall": True, + } + ) + + def get_default_spec_for_update(self, override=None): + spec = { + "name": "", + "description": "", + "resetNameInNxCluster": False, + "resetDescriptionInNxCluster": False, + "resetCredential": False, + "credentials": [], + "resetTags": False, + "resetName": False, + "resetDescription": False, + } + + # populate spec with values from old_spec + if override: + for key in spec: + if key in override: + spec[key] = override[key] + + return spec + + def get_default_delete_spec(self, **kwargs): + delete = kwargs.get("delete", False) + return deepcopy( + { + "softRemove": False, + "remove": not delete, + "delete": delete, + "deleteVgs": False, + "deleteVmSnapshots": False, + } + ) + + def get_spec(self, old_spec=None, params=None, **kwargs): + # if db server vm is required for db instance + if kwargs.get("db_instance_provision"): + if kwargs.get("provision_new_server"): + return self.get_spec_provision_for_db_instance( + old_spec=old_spec, params=params, **kwargs + ) + elif kwargs.get("use_registered_server"): + return self.get_spec_registered_server_for_db_instance_provision( + old_spec=old_spec, params=params, **kwargs + ) + + # if db server vm is required for registering db instance + elif kwargs.get("db_instance_register"): + if kwargs.get("use_registered_server"): + return self.get_spec_registered_vm_for_db_instance_registration( + old_spec=old_spec, params=params, **kwargs + ) + elif kwargs.get("register_server"): + return self.get_spec_register_for_db_instance_registration( + old_spec=old_spec, params=params, **kwargs + ) + + elif kwargs.get("db_clone"): + if kwargs.get("provision_new_server"): + return self.get_spec_provision_for_db_instance( + old_spec=old_spec, params=params, **kwargs + ) + elif kwargs.get("use_authorized_server"): + return self.get_spec_authorized_vm( + old_spec=old_spec, params=params, **kwargs + ) + + # if only db server vm provision or register is required + else: + if kwargs.get("provision_new_server"): + return self.get_spec_provision( + old_spec=old_spec, params=params, **kwargs + ) + elif kwargs.get("register_server"): + return self.get_spec_register( + old_spec=old_spec, params=params, **kwargs + ) + elif kwargs.get("update"): + return self.get_spec_update_vm( + old_spec=old_spec, params=params, **kwargs + ) + elif kwargs.get("delete"): + return self.get_spec_delete_vm( + old_spec=old_spec, params=params, **kwargs + ) + return None, "Please provide supported arguments" + + def get_db_engine_spec(self, payload, params=None, **kwargs): + + db_engine, err = create_db_engine(self.module, db_architecture="single") + if err: + return None, err + + db_type = db_engine.get_type() + + config = self.module.params.get(db_type) or params + + if kwargs.get("register"): + payload, err = db_engine.build_spec_db_server_vm_register_action_arguments( + payload, config + ) + if err: + return None, err + payload["databaseType"] = db_type + "_database" + + elif kwargs.get("provision"): + # add db engine specific spec for provisioning vm + pass + + return payload, err + + # this routine populates spec for provisioning db vm for database instance creation + def get_spec_provision_for_db_instance(self, old_spec=None, params=None, **kwargs): + + self.build_spec_methods.update({"pub_ssh_key": self.build_spec_pub_ssh_key}) + + if not params: + params = self.module.params.get("db_vm", {}).get("create_new_server", {}) + + if not params: + return ( + None, + "db server vm input is required for creating spec for new db server vm", + ) + + payload, err = super().get_spec(old_spec=old_spec, params=params, **kwargs) + if err: + return None, err + + # configure vm related spec + kwargs = {"network_profile_uuid": payload["networkProfileId"]} + + payload, err = self.build_spec_vms(payload, [params], **kwargs) + if err: + return None, err + + # add description + payload["actionArguments"].append( + {"name": "dbserver_description", "value": params.get("desc")} + ) + + payload["clustered"] = False + payload["createDbserver"] = True + return payload, err + + # this routine populates spec for registered db vm to host new database instance + def get_spec_registered_server_for_db_instance_provision( + self, old_spec=None, params=None, **kwargs + ): + payload = deepcopy(old_spec) + if not params: + params = self.module.params.get("db_vm", {}).get( + "use_registered_server", {} + ) + + if not params: + return ( + None, + "db server vm input is required for creating spec for registered db server vm", + ) + + uuid, err = self.get_db_server_uuid(params) + if err: + return None, err + + payload["createDbserver"] = False + payload["dbserverId"] = uuid + payload["nodes"] = [{"properties": [], "dbserverId": uuid}] + return payload, None + + # this routine creates spec for provisioning of db server vm + def get_spec_provision(self, old_spec=None, params=None, **kwargs): + + self.build_spec_methods.update( + { + "database_type": self._build_spec_database_type, + "time_machine": self._build_spec_time_machine, + "software_profile": self.build_spec_software_profile, + "time_zone": self._build_spec_time_zone, + "desc": self._build_spec_description, + } + ) + + payload, err = super().get_spec(old_spec=old_spec, params=params, **kwargs) + if err: + return None, err + + # add vm name + if not payload.get("actionArguments"): + payload["actionArguments"] = [] + + payload["actionArguments"].append( + {"name": "vm_name", "value": self.module.params.get("name")} + ) + + # add client public key + payload["actionArguments"].append( + { + "name": "client_public_key", + "value": self.module.params.get("pub_ssh_key"), + } + ) + + return payload, err + + # this routine creates spec for registration of db server vm + def get_spec_register(self, old_spec=None, params=None, **kwargs): + self.build_spec_methods = { + "ip": self._build_spec_register_vm_ip, + "username": self._build_spec_register_username, + "password": self._build_spec_register_password, + "private_ssh_key": self._build_spec_register_private_ssh_key, + "reset_desc_in_ntnx_cluster": self._build_spec_reset_description, + "cluster": self.build_spec_cluster, + "desc": self._build_spec_description, + "working_directory": self._build_spec_register_working_dir, + } + + payload, err = super().get_spec(old_spec=old_spec, params=params, **kwargs) + if err: + return None, err + + # field name changes as per api requirements + payload["nxClusterUuid"] = payload.pop("nxClusterId") + + return payload, err + + # this routine creates spec for registration of db server vm for db instance registration + def get_spec_register_for_db_instance_registration( + self, old_spec=None, params=None, **kwargs + ): + + self.build_spec_methods = { + "ip": self._build_spec_register_vm_ip, + "username": self._build_spec_register_vm_username, + "password": self._build_spec_register_vm_password, + "private_ssh_key": self._build_spec_register_vm_private_ssh_key, + "desc": self._build_spec_register_vm_desc, + "reset_desc_in_ntnx_cluster": self._build_spec_reset_description, + "cluster": self.build_spec_cluster, + } + + if not params: + params = self.module.params.get("db_vm", {}).get("unregistered", {}) + + if not params: + return ( + None, + "db server vm input is required for creating spec for registering db server vm", + ) + payload, err = super().get_spec(old_spec=old_spec, params=params, **kwargs) + if err: + return None, err + + action_arguments = payload.get("actionArguments", []) + action_arguments.append({"name": "vmIp", "value": payload.get("vmIp")}) + payload["actionArguments"] = action_arguments + + return payload, err + + # this routine creates spec for registered vm to register db instance from it + def get_spec_registered_vm_for_db_instance_registration( + self, old_spec=None, params=None, **kwargs + ): + payload = deepcopy(old_spec) + if not params: + params = self.module.params.get("db_vm", {}).get("registered", {}) + + if not params: + return ( + None, + "Registered db server vm input is required for creating spec for db server vm", + ) + + # fetch vm ip using name or uuid + if params.get("name") or params.get("uuid"): + + vm_info, err = self.get_db_server( + name=params.get("name"), uuid=params.get("uuid") + ) + if err: + return None, err + + if not vm_info.get("ipAddresses", []): + return None, "No IP address found for given db server vm" + + # picking first IP of db server vm for registraion + payload["vmIp"] = vm_info["ipAddresses"][0] + + elif params.get("ip"): + payload["vmIp"] = params["ip"] + + else: + return None, "name, uuid or ip is required for registered vm configuration" + + return payload, None + + def get_spec_authorized_vm(self, old_spec=None, params=None, **kwargs): + payload = deepcopy(old_spec) + if not params: + params = self.module.params.get("db_vm", {}).get( + "use_authorized_server", {} + ) + + if not params: + return ( + None, + "Authorized db server vm input is required for creating spec for authorized db server vm", + ) + + time_machine_uuid = kwargs.get("time_machine_uuid") or payload.get( + "timeMachineId" + ) + if not time_machine_uuid: + return ( + None, + "Time machine uuid is required for creating authorized db server vm spec", + ) + + # get db server vm uuid associated with given time machine + time_machine = TimeMachine(self.module) + db_server_vm_uuid, err = time_machine.get_authorized_db_server_vm_uuid( + time_machine_uuid=time_machine_uuid, config=params + ) + if err: + return None, err + + db_server_vm = self.read(uuid=db_server_vm_uuid) + + payload["createDbserver"] = False + payload["dbserverId"] = db_server_vm_uuid + payload["nxClusterId"] = db_server_vm.get("nxClusterId") + payload["nodes"] = [ + { + "vmName": db_server_vm.get("name"), + "properties": [], + "nxClusterId": db_server_vm.get("nxClusterId"), + "dbserverId": db_server_vm_uuid, + } + ] + + return payload, None + + def get_spec_update_vm(self, old_spec=None, params=None, **kwargs): + self.build_spec_methods = { + "name": self._build_spec_update_name, + "desc": self._build_spec_update_desc, + "reset_name_in_ntnx_cluster": self._build_spec_update_reset_name_in_ntnx_cluster, + "reset_desc_in_ntnx_cluster": self._build_spec_reset_description, + "update_credentials": self._build_spec_update_credentials, + } + + return super().get_spec(old_spec=old_spec, params=params, **kwargs) + + def get_spec_delete_vm(self, old_spec=None, params=None, **kwargs): + self.build_spec_methods = { + "delete_from_cluster": self._build_spec_delete_from_cluster, + "delete_vm_snapshots": self._build_spec_delete_vm_snapshots, + "delete_vgs": self._build_spec_delete_volume_groups, + "soft_remove": self._build_spec_soft_remove, + } + + return super().get_spec(old_spec=old_spec, params=params, **kwargs) + + # builder methods for vm provisioning + def _build_spec_name(self, payload, name): + if not payload.get("nodes"): + payload["nodes"] = [{}] + + payload["nodes"][0]["vmName"] = name + return payload, None + + def _build_spec_description(self, payload, desc): + payload["description"] = desc + return payload, None + + def _build_spec_database_type(self, payload, db_type): + payload["databaseType"] = db_type + return payload, None + + def build_spec_compute_profile(self, payload, profile): + # set compute profile + compute_profile = ComputeProfile(self.module) + uuid, err = compute_profile.get_profile_uuid(profile) + if err: + return None, err + payload["computeProfileId"] = uuid + return payload, None + + def build_spec_software_profile(self, payload, profile): + # set software profile + software_profile = SoftwareProfile(self.module) + uuid, err = software_profile.get_profile_uuid(profile) + if err: + return None, err + + payload["softwareProfileId"] = uuid + if profile.get("version_uuid"): + payload["softwareProfileVersionId"] = profile["version_uuid"] + else: + software_profile = software_profile.read(uuid) + payload["softwareProfileId"] = uuid + payload["softwareProfileVersionId"] = software_profile["latestVersionId"] + + return payload, None + + def build_spec_network_profile(self, payload, profile): + # set network prfile + network_profile = NetworkProfile(self.module) + uuid, err = network_profile.get_profile_uuid(profile) + if err: + return None, err + + payload["networkProfileId"] = uuid + return payload, None + + def build_spec_cluster(self, payload, cluster): + # set cluster config + uuid, err = get_cluster_uuid(self.module, cluster) + if err: + return None, err + payload["nxClusterId"] = uuid + return payload, None + + def build_spec_pub_ssh_key(self, payload, pub_ssh_key): + payload["sshPublicKey"] = pub_ssh_key + return payload, None + + def _build_spec_time_machine(self, payload, time_machine): + _time_machine = TimeMachine(self.module) + uuid, err = _time_machine.get_time_machine_uuid(time_machine) + if err: + return None, err + + payload["timeMachineId"] = uuid + if time_machine.get("snapshot_uuid"): + payload["snapshotId"] = time_machine.get("snapshot_uuid") + payload["latestSnapshot"] = False + else: + payload["latestSnapshot"] = True + + return payload, None + + def _build_spec_time_zone(self, payload, time_zone): + payload["timeZone"] = time_zone + return payload, None + + def _build_spec_ip(self, payload, ip): + payload["ipInfos"] = [{"ipType": "VM_IP", "ipAddresses": [ip]}] + return payload, None + + def build_spec_password(self, payload, password): + payload["vmPassword"] = password + return payload, None + + def build_spec_vms(self, payload, vms, **kwargs): # noqa: C901 + """ + This method takes list of vm input and create specs for each. + Pass acceptable defaults in kwargs. + """ + nodes = payload.get("nodes", []) + + # get cluster uuid map to assign cluster uuid for each node vm + cluster = Cluster(self.module) + clusters = cluster.get_all_clusters_name_uuid_map() + + # spec with default vlaues + spec = { + "properties": [], + "vmName": "", + "networkProfileId": kwargs.get("network_profile_uuid", ""), + "computeProfileId": kwargs.get("compute_profile_uuid", ""), + "nxClusterId": kwargs.get("cluster_uuid", ""), + } + + # get all network profile name uuid map + network_profile = NetworkProfile(self.module) + network_profiles, err = network_profile.get_all_name_uuid_map() + if err: + return None, err + + # get all compute profile name uuid map + compute_profile = ComputeProfile(self.module) + compute_profiles, err = compute_profile.get_all_name_uuid_map() + if err: + return None, err + + for vm in vms: + + node = deepcopy(spec) + node["vmName"] = vm.get("name") + + properties = ["role", "node_type"] + + for prop in properties: + if prop in vm: + node["properties"].append({"name": prop, "value": vm[prop]}) + + if vm.get("archive_log_destination"): + node["properties"].append( + { + "name": "remote_archive_destination", + "value": vm.get("archive_log_destination"), + } + ) + + # add network profile for a vm if required + if vm.get("network_profile"): + uuid = "" + if vm["network_profile"].get("name"): + if network_profiles.get(vm["network_profile"]["name"]): + uuid = network_profiles[vm["network_profile"]["name"]] + else: + return None, "Network profile with name '{0}' not found".format( + vm["network_profile"]["name"] + ) + + elif vm["network_profile"].get("uuid"): + uuid = vm["network_profile"]["uuid"] + + node["networkProfileId"] = uuid + + # add network profile for a vm if required + if vm.get("compute_profile"): + uuid = "" + if vm["compute_profile"].get("name"): + if compute_profiles.get(vm["compute_profile"]["name"]): + uuid = compute_profiles[vm["compute_profile"]["name"]] + else: + return None, "Compute profile with name '{0}' not found".format( + vm["compute_profile"]["name"] + ) + + elif vm["compute_profile"].get("uuid"): + uuid = vm["compute_profile"]["uuid"] + + node["computeProfileId"] = uuid + + # add cluster spec for a vm + if vm.get("cluster"): + cluster_uuid = "" + if vm["cluster"].get("name"): + if clusters.get(vm["cluster"]["name"]): + cluster_uuid = clusters[vm["cluster"]["name"]] + else: + return None, "NDB cluster with name '{0}' not found".format( + vm["cluster"]["name"] + ) + + elif vm["cluster"].get("uuid"): + cluster_uuid = vm["cluster"]["uuid"] + + node["nxClusterId"] = cluster_uuid + + if vm.get("ip"): + node, err = self._build_spec_ip(node, vm.get("ip")) + if err: + return None, err + + nodes.append(node) + + payload["nodes"] = nodes + payload["nodeCount"] = len(payload["nodes"]) + return payload, None + + # builders for registration + def _build_spec_register_vm_ip(self, payload, ip): + payload["vmIp"] = ip + return payload, None + + def _build_spec_register_vm_username(self, payload, username): + payload["vmUsername"] = username + return payload, None + + def _build_spec_register_vm_password(self, payload, password): + payload["vmPassword"] = password + return payload, None + + def _build_spec_register_vm_private_ssh_key(self, payload, private_ssh_key): + payload["vmSshkey"] = private_ssh_key + return payload, None + + def _build_spec_register_vm_desc(self, payload, desc): + payload["vmDescription"] = desc + return payload, None + + def _build_spec_register_username(self, payload, username): + payload["username"] = username + return payload, None + + def _build_spec_register_password(self, payload, password): + payload["password"] = password + return payload, None + + def _build_spec_register_private_ssh_key(self, payload, private_ssh_key): + payload["sshPrivateKey"] = private_ssh_key + return payload, None + + def _build_spec_reset_description(self, payload, reset_desc): + payload["resetDescriptionInNxCluster"] = reset_desc + return payload, None + + def _build_spec_register_working_dir(self, payload, working_dir): + payload["workingDirectory"] = working_dir + return payload, None + + def _build_spec_update_reset_name_in_ntnx_cluster(self, payload, reset): + payload["resetNameInNxCluster"] = reset + return payload, None + + def _build_spec_update_name(self, payload, name): + if name != payload.get("name", ""): + payload["name"] = name + payload["resetName"] = True + return payload, None + + def _build_spec_update_desc(self, payload, desc): + if desc != payload.get("desc", ""): + payload["description"] = desc + payload["resetDescription"] = True + return payload, None + + def _build_spec_update_credentials(self, payload, credentials): + payload["credentials"] = credentials + payload["resetCredential"] = True + return payload, None + + # builders for deleting vm spec + def _build_spec_delete_from_cluster(self, payload, delete_from_cluster): + payload["delete"] = delete_from_cluster + return payload, None + + def _build_spec_delete_volume_groups(self, payload, delete_volume_groups): + payload["deleteVgs"] = delete_volume_groups + return payload, None + + def _build_spec_soft_remove(self, payload, soft_remove): + payload["softRemove"] = soft_remove + payload["remove"] = False + return payload, None + + def _build_spec_delete_vm_snapshots(self, payload, delete_vm_snapshots): + payload["deleteVmSnapshots"] = delete_vm_snapshots + return payload, None + + def resolve_uuids_from_entity_specs(self, vms): + """ + This helper creates list of uuids from list of db server vms config (containing either name or uuid) + """ + vms_name_uuid_map = self.get_all_db_servers_name_uuid_map() + + uuids = [] + for vm in vms: + + if vm.get("name"): + if vms_name_uuid_map.get(vm["name"]): + uuid = vms_name_uuid_map[vm["name"]] + else: + return None, "DB server vm with name '{0}' not found".format( + vm["name"] + ) + + elif vm.get("uuid"): + uuid = vm["uuid"] + else: + return None, "uuid or name is required for setting db server vm" + + uuids.append(uuid) + + return uuids, None diff --git a/plugins/module_utils/ndb/db_servers.py b/plugins/module_utils/ndb/db_servers.py deleted file mode 100644 index e5e7d0435..000000000 --- a/plugins/module_utils/ndb/db_servers.py +++ /dev/null @@ -1,71 +0,0 @@ -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -from .nutanix_database import NutanixDatabase - - -class DBServers(NutanixDatabase): - def __init__(self, module): - resource_type = "/dbservers" - super(DBServers, self).__init__(module, resource_type=resource_type) - - def get_uuid( - self, - value, - key="name", - data=None, - entity_type=None, - raise_error=True, - no_response=False, - ): - query = {"value-type": key, "value": value} - resp = self.read(query=query) - - if not resp: - return None, "DB server vm with name {0} not found.".format(value) - - uuid = resp[0].get("id") - return uuid, None - - def get_db_server(self, name=None, uuid=None, ip=None): - resp = None - if uuid: - resp = self.read(uuid=uuid) - elif name or ip: - key = "name" if name else "ip" - val = name if name else ip - query = {"value-type": key, "value": val} - resp = self.read(query=query) - if not resp: - return None, "Database server with {0} {1} not found".format(key, val) - resp = resp[0] - else: - return ( - None, - "Please provide uuid, name or server IP for fetching database server details", - ) - - return resp, None - - -# Helper functions - - -def get_db_server_uuid(module, config): - if "name" in config: - db_servers = DBServers(module) - name = config["name"] - uuid, err = db_servers.get_uuid(name) - if err: - return None, err - elif "uuid" in config: - uuid = config["uuid"] - else: - error = "Config {0} doesn't have name or uuid key".format(config) - return None, error - - return uuid, None diff --git a/plugins/module_utils/ndb/maintenance_window.py b/plugins/module_utils/ndb/maintenance_window.py new file mode 100644 index 000000000..503ea73ba --- /dev/null +++ b/plugins/module_utils/ndb/maintenance_window.py @@ -0,0 +1,252 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +from copy import deepcopy + +__metaclass__ = type + + +from .db_server_cluster import DBServerCluster +from .db_server_vm import DBServerVM +from .nutanix_database import NutanixDatabase + + +class MaintenanceWindow(NutanixDatabase): + def __init__(self, module): + resource_type = "/maintenance" + super(MaintenanceWindow, self).__init__(module, resource_type=resource_type) + self.build_spec_methods = { + "name": self._build_spec_name, + "desc": self._build_spec_desc, + "schedule": self._build_spec_schedule, + } + + def update( + self, + data=None, + uuid=None, + endpoint=None, + query=None, + raise_error=True, + no_response=False, + timeout=30, + method="PATCH", + ): + return super().update( + data, uuid, endpoint, query, raise_error, no_response, timeout, method + ) + + def update_tasks(self, data): + endpoint = "tasks" + return super().create(data=data, endpoint=endpoint) + + def get_uuid( + self, + value, + key="name", + data=None, + entity_type=None, + raise_error=True, + no_response=False, + ): + resp = self.read() + for entity in resp: + if entity.get(key) == value: + return entity.get("id"), None + + return None, "Maintenance window with name {0} not found.".format(value) + + def get_maintenance_window_uuid(self, config): + if "name" in config: + name = config["name"] + uuid, err = self.get_uuid(value=name) + if err: + return None, err + elif "uuid" in config: + uuid = config["uuid"] + else: + error = "Config {0} doesn't have name or uuid key".format(config) + return None, error + + return uuid, None + + def get_spec(self, old_spec=None, params=None, **kwargs): + if kwargs.get("configure_automated_patching"): + return self.get_spec_for_automated_patching( + old_spec=old_spec, params=params, **kwargs + ) + + else: + return super().get_spec(old_spec=old_spec, params=params, **kwargs) + + def _get_default_spec(self): + return deepcopy({"name": "", "description": "", "timezone": "", "schedule": {}}) + + def get_default_update_spec(self, override_spec=None): + spec = { + "name": "", + "description": "", + "timezone": "", + "schedule": {}, + "resetSchedule": True, + "resetDescription": True, + "resetName": True, + } + if override_spec: + for key in spec: + if key in override_spec: + spec[key] = override_spec[key] + + return spec + + def get_default_automated_patching_spec(self): + return deepcopy({"maintenanceWindowId": "", "tasks": []}) + + def get_spec_for_automated_patching(self, old_spec=None, params=None, **kwargs): + config = params or self.module.params.get("automated_patching", {}) + + payload = old_spec + if not payload: + payload = self.get_default_automated_patching_spec() + + self.build_spec_methods = { + "maintenance_window": self._build_spec_maintenance_window, + "tasks": self._build_spec_tasks, + "db_server_vms": self._build_spec_db_server_vms, + "db_server_clusters": self._build_spec_db_server_clusters, + } + return super().get_spec(old_spec=payload, params=config, **kwargs) + + def _build_spec_name(self, payload, name): + + payload["name"] = name + return payload, None + + def _build_spec_desc(self, payload, desc): + + payload["description"] = desc + return payload, None + + def _build_spec_schedule(self, payload, schedule): + spec = payload.get("schedule", {}) + + if schedule.get("recurrence"): + spec["recurrence"] = schedule.get("recurrence").upper() + + if schedule.get("day_of_week"): + spec["dayOfWeek"] = schedule.get("day_of_week").upper() + + if schedule.get("day_of_week"): + spec["weekOfMonth"] = schedule.get("week_of_month") + + if schedule.get("duration"): + spec["duration"] = schedule.get("duration") + + if schedule.get("start_time"): + spec["startTime"] = schedule.get("start_time") + + payload["schedule"] = spec + + payload["timezone"] = schedule.get("timezone") + return payload, None + + # builders for configuring automated patching + def _build_spec_maintenance_window(self, payload, mw): + uuid, err = self.get_maintenance_window_uuid(config=mw) + if err: + return None, err + payload["maintenanceWindowId"] = uuid + return payload, err + + def _build_spec_tasks(self, payload, tasks): + specs = payload.get("tasks", []) + for task in tasks: + spec = {} + + if task.get("type"): + spec["taskType"] = task.get("type") + else: + return ( + None, + "'type' is required for setting task type in automated patching", + ) + + # set pre post commands + spec["payload"] = {"prePostCommand": {}} + if task.get("pre_task_cmd"): + spec["payload"]["prePostCommand"]["preCommand"] = task.get( + "pre_task_cmd" + ) + if task.get("post_task_cmd"): + spec["payload"]["prePostCommand"]["postCommand"] = task.get( + "post_task_cmd" + ) + + specs.append(spec) + + payload["tasks"] = specs + return payload, None + + def _build_spec_db_server_vms(self, payload, vms): + db_server_vms = DBServerVM(self.module) + uuids, err = db_server_vms.resolve_uuids_from_entity_specs(vms=vms) + if err: + return None, err + + if not payload.get("entities"): + payload["entities"] = {} + payload["entities"]["ERA_DBSERVER"] = uuids + return payload, None + + def _build_spec_db_server_clusters(self, payload, clusters): + db_server_clusters = DBServerCluster(self.module) + cluster_name_uuid_map = db_server_clusters.get_all_clusters_name_uuid_map() + + uuids = [] + for cluster in clusters: + + if cluster.get("name"): + if cluster_name_uuid_map.get(cluster["name"]): + uuid = cluster_name_uuid_map[cluster["name"]] + else: + return None, "DB server cluster with name '{0}' not found".format( + cluster["name"] + ) + + elif cluster.get("uuid"): + uuid = cluster["uuid"] + else: + return ( + None, + "uuid or name is required for setting db server cluster uuid", + ) + + uuids.append(uuid) + + if not payload.get("entities"): + payload["entities"] = {} + payload["entities"]["ERA_DBSERVER_CLUSTER"] = uuids + return payload, None + + +class AutomatedPatchingSpec: + mutually_exclusive = [("name", "uuid")] + + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + + task = dict( + type=dict(type="str", choices=["OS_PATCHING", "DB_PATCHING"], required=False), + pre_task_cmd=dict(type="str", required=False), + post_task_cmd=dict(type="str", required=False), + ) + + automated_patching_argument_spec = dict( + maintenance_window=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + tasks=dict(type="list", elements="dict", options=task, required=False), + ) diff --git a/plugins/module_utils/ndb/operations.py b/plugins/module_utils/ndb/operations.py index 0423dd2e7..3ca60c6f4 100644 --- a/plugins/module_utils/ndb/operations.py +++ b/plugins/module_utils/ndb/operations.py @@ -16,14 +16,18 @@ def __init__(self, module): resource_type = "/operations" super(Operation, self).__init__(module, resource_type=resource_type) - def wait_for_completion(self, uuid, raise_error=True): - delay = NDB.OPERATIONS_POLLING_DELAY + def wait_for_completion( + self, uuid, raise_error=True, delay=NDB.OPERATIONS_POLLING_DELAY + ): timeout = time.time() + self.module.params["timeout"] resp = None while True: resp = self.read(uuid) status = resp.get("status") - if status == NDB.StatusCodes.SUCCESS: + if ( + status == NDB.StatusCodes.SUCCESS + or status == NDB.StatusCodes.COMPLETED_WITH_WARNING + ): return resp elif status == NDB.StatusCodes.FAILURE: if not raise_error: diff --git a/plugins/module_utils/ndb/profiles.py b/plugins/module_utils/ndb/profiles.py deleted file mode 100644 index 0e2f29d8f..000000000 --- a/plugins/module_utils/ndb/profiles.py +++ /dev/null @@ -1,94 +0,0 @@ -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -from .nutanix_database import NutanixDatabase - - -class Profile(NutanixDatabase): - types = ["Database_Parameter", "Compute", "Network", "Software"] - - def __init__(self, module): - resource_type = "/profiles" - super(Profile, self).__init__(module, resource_type=resource_type) - - def get_profile_uuid(self, type, name): - if type not in self.types: - return None, "{0} is not a valid type. Allowed types are {1}".format( - type, self.types - ) - query = {"type": type, "name": name} - resp = self.read(query=query) - uuid = resp.get("id") - return uuid - - def read( - self, - uuid=None, - endpoint=None, - query=None, - raise_error=True, - no_response=False, - timeout=30, - ): - if uuid: - if not query: - query = {} - query["id"] = uuid - return super().read( - uuid=None, - endpoint=endpoint, - query=query, - raise_error=raise_error, - no_response=no_response, - timeout=timeout, - ) - - def get_profile_by_version(self, uuid, version_id="latest"): - endpoint = "{0}/versions/{1}".format(uuid, version_id) - resp = self.read(endpoint=endpoint) - return resp - - def get_profiles(self, uuid=None, name=None, type=None): - if name or uuid: - query = {} - if name: - query["name"] = name - else: - query["id"] = uuid - - if type: - query["type"] = type - - resp = self.read(query=query) - elif type: - query = {"type": type} - resp = self.read(query=query) - if not resp: - return None, "Profiles with type {0} not found".format(type) - else: - return ( - None, - "Please provide uuid, name or profile type for fetching profile details", - ) - - return resp, None - - -# helper functions - - -def get_profile_uuid(module, type, config): - uuid = "" - if config.get("name"): - profiles = Profile(module) - uuid = profiles.get_profile_uuid(type, config["name"]) - elif config.get("uuid"): - uuid = config["uuid"] - else: - error = "Profile config {0} doesn't have name or uuid key".format(config) - return error, None - return uuid, None diff --git a/plugins/module_utils/ndb/profiles/profile_types.py b/plugins/module_utils/ndb/profiles/profile_types.py new file mode 100644 index 000000000..19cccbc4d --- /dev/null +++ b/plugins/module_utils/ndb/profiles/profile_types.py @@ -0,0 +1,502 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +from copy import deepcopy + +__metaclass__ = type + + +from ...constants import NDB +from ..clusters import Cluster, get_cluster_uuid +from ..database_engines.db_engine_factory import create_db_engine +from .profiles import Profile + + +class ComputeProfile(Profile): + def __init__(self, module): + super(ComputeProfile, self).__init__(module) + self._type = NDB.ProfileTypes.COMPUTE + + def get_create_profile_spec(self, old_spec=None, params=None, **kwargs): + + payload, err = super().get_create_profile_spec( + old_spec=old_spec, params=params, **kwargs + ) + if err: + return None, err + + compute = self.module.params.get("compute", {}) + return self._build_spec_create_profile(payload, compute) + + def get_update_version_spec(self, old_spec=None, params=None, **kwargs): + payload = deepcopy(old_spec) + + if not params: + params = self.module.params.get("compute") + + if params: + payload, err = self._build_spec_update_profile(payload, params) + if err: + return None, err + + # remove not required fields + if payload.get("type") is not None: + payload.pop("type") + if payload.get("topology") is not None: + payload.pop("topology") + if payload.get("versionClusterAssociation") is not None: + payload.pop("versionClusterAssociation") + + return self.build_spec_status(payload, params) + + def _build_spec_create_profile(self, payload, compute=None): + properties = payload.get("properties", []) + + property_map = { + "vcpus": {"name": "CPUS", "default": "1"}, + "cores_per_cpu": {"name": "CORE_PER_CPU", "default": "2"}, + "memory": {"name": "MEMORY_SIZE", "default": "16"}, + } + + for key, config in property_map.items(): + name = config.get("name") + default = config.get("default") + + # fetch value from input or use default + val = str(compute.get(key, default)) + properties.append(self.get_property_spec(name, val)) + + payload["properties"] = properties + return payload, None + + def _build_spec_update_profile(self, payload, compute): + + properties_map = deepcopy(payload["propertiesMap"]) + if compute.get("vcpus"): + properties_map["CPUS"] = str(compute["vcpus"]) + + if compute.get("cores_per_cpu"): + properties_map["CORE_PER_CPU"] = str(compute["cores_per_cpu"]) + + if compute.get("memory"): + properties_map["MEMORY_SIZE"] = str(compute["memory"]) + + properties = payload.get("properties", []) + + # update existing properties with new values + for prop in properties: + prop["value"] = properties_map.get(prop["name"]) + + return payload, None + + +class NetworkProfile(Profile): + def __init__(self, module): + super(NetworkProfile, self).__init__(module) + self._type = NDB.ProfileTypes.NETWORK + + def get_default_version_update_spec(self, override_spec=None): + spec = { + "name": "", + "description": "", + "published": None, + "properties": [], + "propertiesMap": {}, + "topology": "", + "engineType": "", + } + + for key in spec: + if key in override_spec: + spec[key] = override_spec[key] + + return spec + + def get_create_profile_spec(self, old_spec=None, params=None, **kwargs): + self.build_spec_methods.update({"network": self._build_spec_create_profile}) + return super().get_create_profile_spec( + old_spec=old_spec, params=params, **kwargs + ) + + def get_update_version_spec(self, old_spec=None, params=None, **kwargs): + self.build_spec_methods.update( + {"network": self._build_spec_update_profile_version} + ) + payload, err = super().get_spec(old_spec=old_spec, params=params) + if err: + return None, err + + return payload, None + + def _build_spec_create_profile(self, payload, profile): + vlans = profile.get("vlans") + + if profile.get("topology") == "cluster" or len(vlans) > 1: + payload, err = self._build_spec_multi_networks(payload, vlans) + if err: + return None, err + payload["propertiesMap"]["NUM_CLUSTERS"] = len(vlans) + + else: + payload, err = self._build_spec_single_network(payload, vlans[0]) + if err: + return None, err + + payload["propertiesMap"]["ENABLE_IP_ADDRESS_SELECTION"] = str( + profile.get("enable_ip_address_selection", False) + ).lower() + + properties = [] + properties_map = payload.pop("propertiesMap") + for name, val in properties_map.items(): + properties.append({"name": name, "value": val}) + + payload["properties"] = properties + + topology = profile.get("topology") + if topology == "all": + topology = "ALL" + + payload["topology"] = topology + + return payload, None + + def _build_spec_update_profile_version(self, payload, profile): + vlans = profile.get("vlans") + + enable_ip_address_selection = payload.get("propertiesMap", {}).get( + "ENABLE_IP_ADDRESS_SELECTION", False + ) + + if vlans: + payload["propertiesMap"] = {} + err = None + if payload.get("topology") == "cluster": + payload, err = self._build_spec_multi_networks(payload, vlans) + payload["propertiesMap"]["NUM_CLUSTERS"] = len(vlans) + else: + payload, err = self._build_spec_single_network(payload, vlans[0]) + if err: + return None, err + + if profile.get("enable_ip_address_selection") is not None: + enable_ip_address_selection = str( + profile.get("enable_ip_address_selection", False) + ).lower() + payload["propertiesMap"][ + "ENABLE_IP_ADDRESS_SELECTION" + ] = enable_ip_address_selection + + properties = [] + properties_map = payload.pop("propertiesMap") + for name, val in properties_map.items(): + properties.append({"name": name, "value": val}) + + payload["properties"] = properties + return self.build_spec_status(payload, profile) + + def _build_spec_single_network(self, payload, vlan): + cluster = vlan.get("cluster") + cluster_uuid, err = get_cluster_uuid(self.module, cluster) + if err: + return None, err + properties_map = payload.get("propertiesMap", {}) + + properties_map["VLAN_NAME"] = vlan.get("vlan_name") + payload["propertiesMap"] = properties_map + + payload["versionClusterAssociation"] = [{"nxClusterId": cluster_uuid}] + + return payload, None + + def _build_spec_multi_networks(self, payload, vlans): + _clusters = Cluster(self.module) + clusters_uuid_name_map = _clusters.get_all_clusters_uuid_name_map() + clusters_name_uuid_map = _clusters.get_all_clusters_name_uuid_map() + properties_map = payload.get("propertiesMap", {}) + for i in range(len(vlans)): + + properties_map["VLAN_NAME_" + str(i)] = vlans[i].get("vlan_name") + + cluster_name = vlans[i].get("cluster", {}).get("name") + cluster_uuid = vlans[i].get("cluster", {}).get("uuid") + + if cluster_uuid and not cluster_name: + if not clusters_uuid_name_map.get(cluster_uuid): + return None, "Cluster with uuid {0} not found".format(cluster_uuid) + cluster_name = clusters_uuid_name_map[cluster_uuid] + + if not cluster_name: + return None, "Pleae provide uuid or name for getting cluster info" + + properties_map["CLUSTER_NAME_" + str(i)] = cluster_name + properties_map["CLUSTER_ID_" + str(i)] = clusters_name_uuid_map[ + cluster_name + ] + payload["propertiesMap"] = properties_map + return payload, None + + +class SoftwareProfile(Profile): + def __init__(self, module): + super(SoftwareProfile, self).__init__(module) + self._type = NDB.ProfileTypes.SOFTWARE + + def get_create_profile_spec(self, old_spec=None, params=None, **kwargs): + self.build_spec_methods.update( + { + "software": self._build_spec_profile, + "clusters": self._build_spec_clusters_availibilty, + } + ) + payload, err = super().get_create_profile_spec( + old_spec=old_spec, params=params, **kwargs + ) + if err: + return None, err + + if payload.get("updateClusterAvailability"): + payload.pop("updateClusterAvailability") + + return payload, None + + def get_update_profile_spec(self, old_spec=None, params=None, **kwargs): + + self.build_spec_methods.update( + {"clusters": self._build_spec_clusters_availibilty} + ) + payload, err = super().get_update_profile_spec(old_spec, params, **kwargs) + if err: + return None, err + + return payload, None + + def get_create_version_spec(self, old_spec=None, params=None, **kwargs): + if not params: + params = self.module.params.get("software") + + if not params: + return None, "Please provide version config for creating new version" + + params["database_type"] = self.module.params.get("database_type") + payload, err = super().get_spec(old_spec=old_spec, params=params) + if err: + return None, err + + payload["type"] = self._type + + # add new base version related properties + payload, err = self._build_spec_version_create_properties(payload, params) + if err: + return None, err + + topology = self.module.params["software"].get("topology") + if topology == "all": + topology = "ALL" + + payload["topology"] = topology + + return payload, None + + def get_update_version_spec(self, old_spec=None, params=None, **kwargs): + + if not params: + params = self.module.params.get("software") + + params["database_type"] = self.module.params.get("database_type") + + payload, err = super().get_spec(old_spec=old_spec, params=params) + if err: + return None, err + + # update version status and return + return self.build_spec_status(payload, params) + + def _build_spec_profile(self, payload, profile): + + payload, err = self._build_spec_version_create_properties( + payload, profile, base_version=True + ) + if err: + return None, err + + topology = profile.get("topology") + if topology == "all": + topology = "ALL" + + payload["topology"] = topology + return payload, None + + def _build_spec_version_create_properties( + self, payload, version, base_version=False + ): + properties = payload.get("properties", []) + if base_version: + if version.get("name"): + properties.append( + self.get_property_spec("BASE_PROFILE_VERSION_NAME", version["name"]) + ) + if version.get("desc"): + properties.append( + self.get_property_spec( + "BASE_PROFILE_VERSION_DESCRIPTION", version["desc"] + ) + ) + + if version.get("notes", {}).get("os"): + properties.append( + self.get_property_spec("OS_NOTES", version["notes"]["os"]) + ) + + if version.get("notes", {}).get("db_software"): + properties.append( + self.get_property_spec( + "DB_SOFTWARE_NOTES", version["notes"]["db_software"] + ) + ) + + if version.get("db_server_vm"): + # importing here to avoid frozen import + from ..db_server_vm import DBServerVM + + db_server_vm = DBServerVM(self.module) + uuid, err = db_server_vm.get_db_server_uuid(version["db_server_vm"]) + if err: + return None, err + properties.append(self.get_property_spec("SOURCE_DBSERVER_ID", uuid)) + + payload["properties"] = properties + return payload, None + + def _build_spec_clusters_availibilty(self, payload, clusters): + _clusters = Cluster(self.module) + spec = [] + clusters_name_uuid_map = _clusters.get_all_clusters_name_uuid_map() + for cluster in clusters: + uuid = "" + if cluster.get("name"): + uuid = clusters_name_uuid_map.get(cluster["name"]) + if not uuid: + return None, "Cluster with name {0} not found".format( + cluster["name"] + ) + else: + uuid = cluster["uuid"] + + spec.append(uuid) + payload["availableClusterIds"] = spec + payload["updateClusterAvailability"] = True + return payload, None + + def build_spec_status(self, payload, params): + if params.get("publish") is not None: + payload["published"] = params.get("publish") + payload["deprecated"] = False + + elif params.get("deprecate") is not None: + payload["deprecated"] = params.get("deprecate") + + return payload, None + + +class DatabaseParameterProfile(Profile): + def __init__(self, module): + self._type = NDB.ProfileTypes.DB_PARAMS + super(DatabaseParameterProfile, self).__init__(module) + + def get_db_engine_spec(self, payload=None, params=None, **kwargs): + engine_type = kwargs.get("engine_type") + if not engine_type: + engine_type = self.module.params.get("database_type") + + db_engine, err = create_db_engine(self.module, engine_type=engine_type) + if err: + return None, err + + engine_type = db_engine.get_type() + + config = {} + if params: + config = params.get(engine_type, {}) + + if not payload: + payload = {} + + if kwargs.get("create_profile"): + payload, err = db_engine.build_spec_create_db_params_profile_properties( + payload, config + ) + if err: + return None, err + elif kwargs.get("update_version"): + payload, err = db_engine.build_spec_update_db_params_profile_version( + payload, config + ) + if err: + return None, err + + return payload, err + + def get_create_profile_spec(self, old_spec=None, params=None, **kwargs): + payload, err = super().get_create_profile_spec(old_spec, params, **kwargs) + if err: + return None, err + + if not params: + params = self.module.params.get("database_parameter", {}) + return self.get_db_engine_spec( + payload=payload, params=params, create_profile=True + ) + + def get_update_version_spec(self, old_spec=None, params=None, **kwargs): + payload = deepcopy(old_spec) + + if not params: + params = self.module.params.get("database_parameter") + + kwargs["update_version"] = True + payload, err = self.get_db_engine_spec(payload=payload, params=params, **kwargs) + if err: + return None, err + + if payload.get("type") is not None: + payload.pop("type") + if payload.get("topology") is not None: + payload.pop("topology") + if payload.get("versionClusterAssociation") is not None: + payload.pop("versionClusterAssociation") + + return self.build_spec_status(payload, params) + + +# Helper methods for getting profile type objects + + +def get_profile_type(module): + profile_types = NDB.ProfileTypes.ALL + + for type in profile_types: + if type in module.params: + return type, None + + return None, "Input doesn't conatains config for allowed profile types of databases" + + +def get_profile_type_obj(module, profile_type=None): # -> tuple[Profile, str]: + profiles = { + "software": SoftwareProfile, + "network": NetworkProfile, + "compute": ComputeProfile, + "database_parameter": DatabaseParameterProfile, + } + + if not profile_type: + profile_type, err = get_profile_type(module) + if err: + return None, err + + if profile_type in profiles: + return profiles[profile_type](module), None + else: + return None, "Profile type {0} is not supported".format(profile_type) diff --git a/plugins/module_utils/ndb/profiles/profiles.py b/plugins/module_utils/ndb/profiles/profiles.py new file mode 100644 index 000000000..8f0166560 --- /dev/null +++ b/plugins/module_utils/ndb/profiles/profiles.py @@ -0,0 +1,223 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +from copy import deepcopy + +__metaclass__ = type + + +from ..nutanix_database import NutanixDatabase + + +class Profile(NutanixDatabase): + types = ["Database_Parameter", "Compute", "Network", "Software"] + _type = None + + def __init__(self, module): + resource_type = "/profiles" + super(Profile, self).__init__(module, resource_type=resource_type) + self.build_spec_methods = { + "name": self.build_spec_name, + "desc": self.build_spec_desc, + "database_type": self.build_spec_database_type, + } + + def get_type(self): + return self._type + + def get_profile_uuid(self, data, type=None): + uuid = "" + if data.get("name"): + if not type: + type = self._type + if type not in self.types: + return None, "{0} is not a valid type. Allowed types are {1}".format( + type, self.types + ) + + query = {"type": type, "name": data.get("name")} + resp = self.read(query=query) + uuid = resp.get("id") + elif data.get("uuid"): + uuid = data.get("uuid") + else: + error = "Profile config {0} doesn't have name or uuid key".format(data) + return error, None + + return uuid, None + + def read( + self, + uuid=None, + endpoint=None, + query=None, + raise_error=True, + no_response=False, + timeout=30, + ): + if uuid: + if not query: + query = {} + query["id"] = uuid + return super().read( + uuid=None, + endpoint=endpoint, + query=query, + raise_error=raise_error, + no_response=no_response, + timeout=timeout, + ) + + def create_version(self, profile_uuid, data): + endpoint = "versions" + resp = self.update( + uuid=profile_uuid, endpoint=endpoint, data=data, method="POST" + ) + return resp + + def delete_version(self, profile_uuid, version_uuid): + endpoint = "versions/{0}".format(version_uuid) + resp = self.delete(uuid=profile_uuid, endpoint=endpoint, data={}) + return resp + + def update_version(self, profile_uuid, version_uuid, data): + endpoint = "versions/{0}".format(version_uuid) + resp = self.update(uuid=profile_uuid, endpoint=endpoint, data=data) + return resp + + def get_profile_by_version(self, uuid, version_uuid="latest"): + endpoint = "{0}/versions/{1}".format(uuid, version_uuid) + resp = self.read(endpoint=endpoint) + return resp + + def get_profiles(self, uuid=None, name=None, type=None): + + if not type: + type = self._type + + query = {} + if name: + query["name"] = name + else: + query["id"] = uuid + + if type: + query["type"] = type + + resp = self.read(query=query) + return resp + + def get_all_name_uuid_map(self): + if self._type: + query = {"type": self._type} + + name_uuid_map = {} + resp = self.read(query=query) + if not isinstance(resp, list): + return None, "Invalid response type obtained from NDB server" + + for entity in resp: + name_uuid_map[entity["name"]] = entity["id"] + + return name_uuid_map, None + + def _get_default_spec(self): + return deepcopy( + { + "type": "", + "systemProfile": False, + "properties": [], + "name": "", + "description": "", + } + ) + + def get_default_update_spec(self, override_spec=None): + spec = {"name": "", "description": ""} + for key in spec: + if key in override_spec: + spec[key] = override_spec[key] + + return spec + + def get_default_version_update_spec(self, override_spec=None): + spec = { + "name": "", + "description": "", + "published": None, + "properties": [], + "propertiesMap": {}, + } + + for key in spec: + if key in override_spec: + spec[key] = override_spec[key] + + return spec + + def get_spec(self, old_spec=None, params=None, **kwargs): + if kwargs.get("version"): + if kwargs.get("create"): + return self.get_create_version_spec(old_spec, params, **kwargs) + elif kwargs.get("update"): + return self.get_update_version_spec(old_spec, params, **kwargs) + elif kwargs.get("delete"): + return self.get_delete_version_spec(old_spec, params, **kwargs) + elif kwargs.get("create"): + return self.get_create_profile_spec(old_spec, params, **kwargs) + elif kwargs.get("update"): + return self.get_update_profile_spec(old_spec, params, **kwargs) + else: + return super().get_spec(old_spec, params, **kwargs) + + def get_create_profile_spec(self, old_spec=None, params=None, **kwargs): + payload, err = super().get_spec(old_spec=old_spec, params=params, **kwargs) + if err: + return None, err + + payload["type"] = self._type + return payload, None + + def get_update_profile_spec(self, old_spec=None, params=None, **kwargs): + return super().get_spec(old_spec=old_spec, params=params, **kwargs) + + def get_create_version_spec(self, old_spec=None, params=None, **kwargs): + """ + Implement this method to support profile version create + """ + return old_spec, None + + def get_update_version_spec(self, old_spec=None, params=None, **kwargs): + """ + Implement this method to support profile version update + """ + return old_spec, None + + def get_delete_version_spec(self, old_spec=None, params=None, **kwargs): + """ + Implement this method to support profile version delete + """ + return old_spec, None + + def get_property_spec(self, name, value): + return deepcopy({"name": name, "value": value}) + + # common builders + def build_spec_name(self, payload, name): + payload["name"] = name + return payload, None + + def build_spec_desc(self, payload, desc): + payload["description"] = desc + return payload, None + + def build_spec_database_type(self, payload, type): + if self._type != "compute": + payload["engineType"] = type + "_database" + return payload, None + + def build_spec_status(self, payload, params): + if params.get("publish") is not None: + payload["published"] = params.get("publish") + return payload, None diff --git a/plugins/module_utils/ndb/slas.py b/plugins/module_utils/ndb/slas.py index c02ceb164..8176406fc 100644 --- a/plugins/module_utils/ndb/slas.py +++ b/plugins/module_utils/ndb/slas.py @@ -2,6 +2,8 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function +from copy import deepcopy + __metaclass__ = type @@ -12,6 +14,11 @@ class SLA(NutanixDatabase): def __init__(self, module): resource_type = "/slas" super(SLA, self).__init__(module, resource_type=resource_type) + self.build_spec_methods = { + "name": self._build_spec_name, + "desc": self._build_spec_desc, + "frequency": self._build_spec_frequency, + } def get_uuid( self, @@ -23,7 +30,7 @@ def get_uuid( no_response=False, ): endpoint = "{0}/{1}".format(key, value) - resp = self.read(uuid=None, endpoint=endpoint) + resp = self.read(uuid=None, endpoint=endpoint, raise_error=raise_error) return resp.get("id") def get_sla(self, uuid=None, name=None): @@ -37,6 +44,55 @@ def get_sla(self, uuid=None, name=None): return None, "Please provide either uuid or name for fetching sla details" return resp, None + def _get_default_spec(self): + return deepcopy( + { + "name": None, + "continuousRetention": 0, + "dailyRetention": 0, + "weeklyRetention": 0, + "monthlyRetention": 0, + "quarterlyRetention": 0, + } + ) + + def get_default_update_spec(self): + spec = self._get_default_spec() + spec["description"] = None + return spec + + def _build_spec_name(self, payload, name): + payload["name"] = name + return payload, None + + def _build_spec_desc(self, payload, desc): + payload["description"] = desc + return payload, None + + def _build_spec_frequency(self, payload, frequency): + + if frequency.get("logs_retention"): + payload["continuousRetention"] = frequency.get("logs_retention") + + if frequency.get("snapshots_retention"): + + # map of module attributes to api attributes + snapshots_retention_attr_map = { + "daily": "dailyRetention", + "weekly": "weeklyRetention", + "monthly": "monthlyRetention", + "quarterly": "quarterlyRetention", + } + + snapshots_retention = frequency["snapshots_retention"] + + # if input given in module then add in api payload + for attr, api_attr in snapshots_retention_attr_map.items(): + if snapshots_retention.get(attr) is not None: + payload[api_attr] = snapshots_retention[attr] + + return payload, None + # helper functions @@ -50,5 +106,5 @@ def get_sla_uuid(module, config): uuid = config["uuid"] else: error = "sla config {0} doesn't have name or uuid key".format(config) - return error, None + return None, error return uuid, None diff --git a/plugins/module_utils/ndb/snapshots.py b/plugins/module_utils/ndb/snapshots.py new file mode 100644 index 000000000..34de5719c --- /dev/null +++ b/plugins/module_utils/ndb/snapshots.py @@ -0,0 +1,196 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +from copy import deepcopy + +__metaclass__ = type + + +from .clusters import Cluster +from .nutanix_database import NutanixDatabase +from .time_machines import TimeMachine + + +class Snapshot(NutanixDatabase): + def __init__(self, module): + resource_type = "/snapshots" + super(Snapshot, self).__init__(module, resource_type=resource_type) + self.build_spec_methods = { + "name": self._build_spec_name, + "expiry_days": self._build_spec_expiry, + "clusters": self._build_spec_clusters, + } + + def create_snapshot(self, time_machine_uuid, data): + endpoint = "{0}/{1}".format(time_machine_uuid, "snapshots") + time_machine = TimeMachine(self.module) + return time_machine.create(data=data, endpoint=endpoint) + + def rename_snapshot(self, uuid, data): + endpoint = "i/{0}".format(uuid) + return self.update(data=data, endpoint=endpoint, method="PATCH") + + def update_expiry(self, uuid, data): + query = {"set-lcm-config": True} + endpoint = "i/{0}".format(uuid) + return self.update(data=data, endpoint=endpoint, query=query) + + def remove_expiry(self, uuid, data): + endpoint = "i/{0}".format(uuid) + query = {"unset-lcm-config": True} + return self.update(data=data, endpoint=endpoint, query=query) + + def replicate(self, uuid, time_machine_uuid, data): + endpoint = "{0}/{1}/{2}".format("snapshots", uuid, "replicate") + time_machine = TimeMachine(self.module) + return time_machine.update( + data=data, uuid=time_machine_uuid, endpoint=endpoint, method="POST" + ) + + def get_snapshot(self, time_machine_uuid, name): + snapshot_uuid, err = self.get_snapshot_uuid(time_machine_uuid, name) + if err: + return None, err + return ( + self.read(snapshot_uuid, query={"load-replicated-child-snapshots": True}), + None, + ) + + def get_snapshot_uuid(self, time_machine_uuid, name): + query = { + "value-type": "time-machine", + "value": time_machine_uuid, + "detailed": True, + "all": True, + } + + snapshots = self.read(query=query) + uuid = "" + if isinstance(snapshots, list): + + # multiple snapshots can have same name + # check for latest snapshot with given name using latest timestamp + latest_timestamp = 0 + for snapshot in snapshots: + if ( + snapshot.get("name") == name + and snapshot.get("snapshotTimeStampDate") > latest_timestamp + ): + uuid = snapshot.get("id") + latest_timestamp = snapshot.get("snapshotTimeStampDate") + + if not uuid: + return None, "Snapshot with name {0} not found".format(name) + + return uuid, None + + def _get_default_spec(self): + return deepcopy({"name": ""}) + + def _get_default_snapshot_replication_spec(self): + return deepcopy({"nxClusterIds": []}) + + def get_expiry_update_spec(self, config): + expiry = config.get("expiry_days") + spec = { + "lcmConfig": { + "expiryDetails": { + "expireInDays": expiry, + } + } + } + return spec + + def get_rename_snapshot_spec(self, name): + spec = self._get_default_spec() + spec["name"] = name + spec["resetName"] = True + return spec + + def get_remove_expiry_spec(self, uuid, name): + spec = {"id": uuid, "name": name} + return spec + + def get_replicate_snapshot_spec(self): + payload = self._get_default_snapshot_replication_spec() + self.build_spec_methods = { + "clusters": self._build_spec_clusters, + "expiry_days": self._build_spec_expiry, + } + payload, err = self.get_spec(old_spec=payload) + if err: + return None, err + payload["nxClusterIds"] = payload.pop("replicateToClusterIds") + return payload, None + + def _build_spec_name(self, payload, name): + payload["name"] = name + return payload, None + + def _build_spec_expiry(self, payload, expiry): + payload["lcmConfig"] = { + "snapshotLCMConfig": { + "expiryDetails": { + "expireInDays": int(expiry), + } + } + } + return payload, None + + def _build_spec_clusters(self, payload, clusters): + cluster_uuids, err = self.resolve_cluster_uuids(clusters) + if err: + return None, err + payload["replicateToClusterIds"] = cluster_uuids + return payload, None + + def resolve_cluster_uuids(self, clusters): + _clusters = Cluster(self.module) + specs = [] + + # if there are more then one clusters then fetch all to resolve uuids + clusters_name_uuid_map = {} + if len(clusters) > 1: + clusters_name_uuid_map = _clusters.get_all_clusters_name_uuid_map() + + for cluster in clusters: + uuid = "" + if cluster.get("name"): + + uuid = "" + if clusters_name_uuid_map: + uuid = clusters_name_uuid_map.get(cluster.get("name")) + else: + uuid = _clusters.get_uuid(value=cluster.get("name")) + + if not uuid: + return None, "Cluster with name {0} not found".format( + cluster["name"] + ) + else: + uuid = cluster.get("uuid") + + specs.append(uuid) + + return specs, None + + def _build_query_params(self, query_params): + if query_params.get("database-ids"): + db_ids = ",".join(query_params["database-ids"]) + query_params["database-ids"] = db_ids + return query_params + + def get_snapshots(self, query_params=None): + if query_params: + queries = self._build_query_params(query_params) + resp = self.read(query=queries) + else: + resp = self.read() + + return resp + + def get_snapshot_files(self, uuid): + endpoint = "files" + resp = self.read(uuid=uuid, endpoint=endpoint) + return resp diff --git a/plugins/module_utils/ndb/stretched_vlans.py b/plugins/module_utils/ndb/stretched_vlans.py new file mode 100644 index 000000000..ab5cdaa94 --- /dev/null +++ b/plugins/module_utils/ndb/stretched_vlans.py @@ -0,0 +1,89 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +from copy import deepcopy + +from .vlans import VLAN + +__metaclass__ = type + + +class StretchedVLAN(VLAN): + def __init__(self, module): + super(StretchedVLAN, self).__init__(module) + self.build_spec_methods = { + "name": self._build_spec_name, + "desc": self._build_spec_desc, + "gateway": self._build_spec_gateway, + "subnet_mask": self._build_spec_subnet_mask, + "vlans": self._build_spec_vlans, + } + + def get_spec(self, old_spec=None, params=None): + return super().get_spec(old_spec=old_spec, params=params) + + def get_stretched_vlan(self, uuid=None): + if uuid: + endpoint = "stretched-vlan/{0}".format(uuid) + resp = self.read(endpoint=endpoint) + if not resp: + return None, "stretched vlan with uuid {0} not found".format(uuid) + else: + return ( + None, + "Please provide uuid for fetching stretched vlan details", + ) + return resp, None + + def _get_default_spec(self): + return deepcopy({"name": "", "type": "Static", "vlanIds": []}) + + def get_default_update_spec(self, override_spec=None): + spec = deepcopy( + { + "name": "", + "type": "Static", + "metadata": {"gateway": "", "subnetMask": ""}, + "vlanIds": [], + } + ) + if override_spec: + for key in spec.keys(): + if override_spec.get(key): + spec[key] = deepcopy(override_spec[key]) + spec["vlanIds"] = [vlan["id"] for vlan in override_spec.get("vlans")] + return spec + + def _build_spec_name(self, payload, name): + payload["name"] = name + return payload, None + + def _build_spec_desc(self, payload, value): + payload["description"] = value + return payload, None + + def _build_spec_gateway(self, payload, gateway): + payload["metadata"]["gateway"] = gateway + return payload, None + + def _build_spec_subnet_mask(self, payload, subnet_mask): + payload["metadata"]["subnetMask"] = subnet_mask + return payload, None + + def _build_spec_vlans(self, payload, value): + payload["vlanIds"] = value + payload["type"] = "Static" + return payload, None + + def create_stretched_vlan(self, data): + endpoint = "stretched-vlan" + return self.create(data=data, endpoint=endpoint) + + def update_stretched_vlan(self, data, uuid): + endpoint = "stretched-vlan/{0}".format(uuid) + return self.update(data=data, endpoint=endpoint) + + def delete_stretched_vlan(self, uuid): + endpoint = "stretched-vlan/{0}".format(uuid) + return self.delete(endpoint=endpoint) diff --git a/plugins/module_utils/ndb/tags.py b/plugins/module_utils/ndb/tags.py index 295e41b3d..d650b69f8 100644 --- a/plugins/module_utils/ndb/tags.py +++ b/plugins/module_utils/ndb/tags.py @@ -2,6 +2,8 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function +from copy import deepcopy + __metaclass__ = type @@ -14,10 +16,131 @@ class Tag(NutanixDatabase): def __init__(self, module): resource_type = "/tags" super(Tag, self).__init__(module, resource_type=resource_type) + self.build_spec_methods = { + "name": self._build_spec_name, + "desc": self._build_spec_desc, + "entity_type": self._build_spec_entity_type, + "tag_value_required": self._build_spec_tag_vlaue_required, + "status": self._build_spec_status, + } + + def read( + self, + uuid=None, + endpoint=None, + query=None, + raise_error=True, + no_response=False, + timeout=30, + ): + + if uuid: + if not query: + query = {} + query["id"] = uuid + + return super().read( + uuid=None, + query=query, + endpoint=endpoint, + raise_error=raise_error, + no_response=no_response, + timeout=timeout, + ) + + def get_tag_uuid(self, name, entity_type): + # use name + entity_type combination to get tag details + query = {} + if entity_type: + query["entityType"] = entity_type + + resp = self.read(query=query) - def get_all_name_uuid_map(self): - resp = self.read() + uuid = None + if isinstance(resp, list): + for tag in resp: + if tag.get("name") == name and tag.get("status") == "ENABLED": + uuid = tag.get("id") + + else: + return None, "Invalid API response" + + if not uuid: + return None, "Tag with name {0} not found".format(name) + + return uuid, None + + def get_all_name_uuid_map(self, type=None): + query = {} + if type: + query = {"entityType": type} + resp = self.read(query=query) name_uuid_map = {} for tag in resp: name_uuid_map[tag["name"]] = tag["id"] return name_uuid_map + + def get_spec(self, old_spec=None, params=None, **kwargs): + + if kwargs.get("associate_to_entity"): + return self.get_spec_for_tags_association( + old_spec=old_spec, params=params, **kwargs + ) + else: + return super().get_spec(old_spec=old_spec, params=params, **kwargs) + + def get_default_update_spec(self): + return deepcopy( + { + "id": "", + "name": "", + "description": "", + "owner": "", + "required": False, + "status": "", + "entityType": "", + } + ) + + def _get_default_spec(self): + return deepcopy( + {"entityType": "", "name": "", "required": False, "description": ""} + ) + + def get_spec_for_tags_association(self, old_spec=None, params=None, **kwargs): + tags = params or self.module.params.get("tags") + + payload = deepcopy(old_spec) + if not tags: + payload["tags"] = [] + return payload, None + + name_uuid_map = self.get_all_name_uuid_map(type=kwargs.get("type")) + specs = [] + for name, val in tags.items(): + if name not in name_uuid_map: + return None, "Tag with name {0} not found".format(name) + spec = {"tagId": name_uuid_map[name], "tagName": name, "value": val} + specs.append(spec) + payload["tags"] = specs + return payload, None + + def _build_spec_name(self, payload, name): + payload["name"] = name + return payload, None + + def _build_spec_desc(self, payload, desc): + payload["description"] = desc + return payload, None + + def _build_spec_entity_type(self, payload, entity_type): + payload["entityType"] = entity_type + return payload, None + + def _build_spec_tag_vlaue_required(self, payload, tag_value_required): + payload["required"] = tag_value_required + return payload, None + + def _build_spec_status(self, payload, status): + payload["status"] = status + return payload, None diff --git a/plugins/module_utils/ndb/time_machines.py b/plugins/module_utils/ndb/time_machines.py index 1ad0bc8ca..dc54b0dc0 100644 --- a/plugins/module_utils/ndb/time_machines.py +++ b/plugins/module_utils/ndb/time_machines.py @@ -2,23 +2,43 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function -__metaclass__ = type - +from copy import deepcopy +from .clusters import Cluster, get_cluster_uuid from .nutanix_database import NutanixDatabase +from .slas import get_sla_uuid + +__metaclass__ = type class TimeMachine(NutanixDatabase): def __init__(self, module): resource_type = "/tms" super(TimeMachine, self).__init__(module, resource_type=resource_type) + self.build_spec_methods = { + "name": self._build_spec_name, + "desc": self._build_spec_desc, + "schedule": self._build_spec_schedule, + "auto_tune_log_drive": self._build_spec_auto_tune_log_drive, + } + + def log_catchup(self, time_machine_uuid, data): + endpoint = "{0}/{1}".format(time_machine_uuid, "log-catchups") + return self.create(data=data, endpoint=endpoint) - def get_time_machine(self, uuid=None, name=None): + def get_time_machine(self, uuid=None, name=None, query=None): + """ + Fetch time machine info based on uuid or name. + Args: + uuid(str): uuid of time machine + name(str): name of time machine + query(str): query params + """ if uuid: - resp = self.read(uuid=uuid) + resp = self.read(uuid=uuid, query=query) elif name: endpoint = "{0}/{1}".format("name", name) - resp = self.read(endpoint=endpoint) + resp = self.read(endpoint=endpoint, query=query) if isinstance(resp, list): if not resp: return None, "Time machine with name {0} not found".format(name) @@ -31,13 +51,296 @@ def get_time_machine(self, uuid=None, name=None): if not tm: return None, "Time machine with name {0} not found".format(name) resp = tm + return resp, None + + def authorize_db_server_vms(self, uuid, data): + endpoint = "dbservers" + return self.update(data=data, uuid=uuid, endpoint=endpoint, method="POST") + + def deauthorize_db_server_vms(self, uuid, data): + endpoint = "dbservers" + return self.delete(data=data, uuid=uuid, endpoint=endpoint) + + def get_authorized_db_server_vms(self, uuid, query=None): + endpoint = "candidate-dbservers" + return self.read(uuid=uuid, endpoint=endpoint, query=query) - # fetch all details using uuid - if resp.get("id"): - resp = self.read(uuid=resp["id"]) + def get_time_machines(self, value=None, key="uuid", endpoint=None, query=None): + + if value: + endpoint = value + if not query: + query = {} + + query["value-type"] = key + + return self.read(endpoint=endpoint, query=query) + + def get_time_machine_uuid(self, config): + uuid = "" + if config.get("uuid"): + uuid = config["uuid"] + elif config.get("name"): + name = config["name"] + tm, err = self.get_time_machine(name=name) + if err: + return None, err + uuid = tm.get("id") else: - return ( - None, - "Please provide either uuid or name for fetching time machine details", + error = "time machine config {0} doesn't have name or uuid key".format( + config ) - return resp, None + return None, error + + return uuid, None + + @staticmethod + def format_response(response): + """This method formats time machine based responses. It removes attributes as per requirement.""" + attrs = ["metadata", "ownerId", "accessLevel", "category"] + for attr in attrs: + if attr in response: + response.pop(attr) + return response + + def get_log_catchup_spec(self, for_restore=False): + return deepcopy( + { + "forRestore": for_restore, + "actionArguments": [ + {"name": "preRestoreLogCatchup", "value": for_restore}, + {"name": "switch_log", "value": True}, + ], + } + ) + + def get_authorized_db_server_vm_uuid(self, time_machine_uuid, config): + uuid = "" + if config.get("name"): + resp = self.get_authorized_db_server_vms( + uuid=time_machine_uuid, query={"usable": True} + ) + for vm in resp: + if vm.get("name") == config.get("name"): + uuid = vm.get("id") + + if not uuid: + return None, "Authorized db server vm with name {0} not found".format( + config.get("name") + ) + + elif config.get("uuid"): + uuid = config["uuid"] + + else: + error = "Authorized db server vm config {0} doesn't have name or uuid key".format( + config + ) + return error, None + + return uuid, None + + def _get_default_spec(self): + return deepcopy( + { + "name": "", + "description": "", + "schedule": {}, + "autoTuneLogDrive": True, + } + ) + + def get_spec(self, old_spec, params=None, **kwargs): + + if not params: + if self.module.params.get("time_machine"): + params = self.module.params.get("time_machine") + else: + return None, "'time_machine' is required for creating time machine spec" + + time_machine_spec, err = super().get_spec(params=params) + if err: + return None, err + + # set sla spec + sla_uuid, err = get_sla_uuid(self.module, params["sla"]) + if err: + return None, err + + # set destination clusters incase of HA instance + if params.get("clusters"): + cluster_uuids = [] + + # fetch all clusters name uuid map + _cluster = Cluster(self.module) + clusters_name_uuid_map = _cluster.get_all_clusters_name_uuid_map() + + for cluster in params.get("clusters"): + cluster_uuid = "" + if cluster.get("name"): + if clusters_name_uuid_map.get(cluster["name"]): + cluster_uuid = clusters_name_uuid_map[cluster["name"]] + else: + return None, "NDB cluster with name '{0}' not found".format( + cluster["name"] + ) + + elif cluster.get("uuid"): + cluster_uuid = cluster["uuid"] + + cluster_uuids.append(cluster_uuid) + + time_machine_spec["slaDetails"] = { + "primarySla": {"slaId": sla_uuid, "nxClusterIds": cluster_uuids} + } + else: + time_machine_spec["slaId"] = sla_uuid + + old_spec["timeMachineInfo"] = time_machine_spec + return old_spec, None + + def get_authorize_db_server_vms_spec(self): + from .db_server_vm import DBServerVM + + _db_server_vms = DBServerVM(self.module) + db_server_vms = self.module.params.get("db_server_vms") + + uuids, err = _db_server_vms.resolve_uuids_from_entity_specs(vms=db_server_vms) + payload = uuids + return payload, err + + def _build_spec_name(self, payload, name): + payload["name"] = name + return payload, None + + def _build_spec_desc(self, payload, desc): + payload["description"] = desc + return payload, None + + def _build_spec_auto_tune_log_drive(self, payload, auto_tune): + payload["autoTuneLogDrive"] = auto_tune + return payload, None + + def _build_spec_schedule(self, payload, schedule): + schedule_spec = {} + if schedule.get("daily"): + + time = schedule["daily"].split(":") + if len(time) != 3: + return None, "Daily snapshot schedule not in HH:MM:SS format." + + schedule_spec["snapshotTimeOfDay"] = { + "hours": int(time[0]), + "minutes": int(time[1]), + "seconds": int(time[2]), + } + + if schedule.get("weekly"): + schedule_spec["weeklySchedule"] = { + "enabled": True, + "dayOfWeek": schedule["weekly"], + } + + if schedule.get("monthly"): + schedule_spec["monthlySchedule"] = { + "enabled": True, + "dayOfMonth": schedule["monthly"], + } + + # set quaterly and yearly as they are dependent on monthly + if schedule.get("quaterly"): + schedule_spec["quartelySchedule"] = { + "enabled": True, + "startMonth": schedule["quaterly"], + "dayOfMonth": schedule.get("monthly"), + } + + if schedule.get("yearly"): + schedule_spec["yearlySchedule"] = { + "enabled": True, + "month": schedule["yearly"], + "dayOfMonth": schedule.get("monthly"), + } + + if schedule.get("log_catchup") or schedule.get("snapshots_per_day"): + schedule_spec["continuousSchedule"] = { + "enabled": True, + "logBackupInterval": schedule.get("log_catchup"), + "snapshotsPerDay": schedule.get("snapshots_per_day"), + } + + payload["schedule"] = schedule_spec + return payload, None + + def get_default_data_access_management_spec(self, override_spec=None): + spec = deepcopy({"nxClusterId": "", "type": "OTHER", "slaId": ""}) + if override_spec: + for key in spec.keys(): + if override_spec.get(key): + spec[key] = deepcopy(override_spec[key]) + + return spec + + def get_data_access_management_spec(self, old_spec=None): + self.build_spec_methods = { + "cluster": self._build_spec_cluster, + "type": self._build_spec_type, + "sla": self._build_spec_sla, + } + spec = old_spec or self.get_default_data_access_management_spec() + return super().get_spec(old_spec=spec) + + def _build_spec_cluster(self, payload, param): + uuid, err = get_cluster_uuid(self.module, param) + if err: + return None, err + payload["nxClusterId"] = uuid + return payload, None + + def _build_spec_type(self, payload, type): + payload["type"] = type + return payload, None + + def _build_spec_sla(self, payload, param): + uuid, err = get_sla_uuid(self.module, param) + if err: + return None, err + if payload.get("slaId"): + payload["resetSlaId"] = True + payload["slaId"] = uuid + return payload, None + + def check_if_cluster_exists(self, time_machine_uuid, cluster_uuid): + """ + This method checks if cluster is associated with time machine + """ + query = { + "load-associated-clusters": True, + } + resp = self.read(uuid=time_machine_uuid, query=query) + + for cluster in resp.get("associatedClusters", []): + if cluster.get("nxClusterId") == cluster_uuid: + return True + + return False + + def read_data_access_instance(self, time_machine_uuid, cluster_uuid): + endpoint = "clusters/{0}".format(cluster_uuid) + query = {"detailed": True} + return self.read(uuid=time_machine_uuid, endpoint=endpoint, query=query) + + def create_data_access_instance(self, uuid=None, data=None): + return self.update(uuid=uuid, data=data, endpoint="clusters", method="POST") + + def update_data_access_instance(self, tm_uuid=None, cluster_uuid=None, data=None): + endpoint = "clusters/{0}".format(cluster_uuid) + return self.update(uuid=tm_uuid, data=data, endpoint=endpoint, method="PATCH") + + def delete_data_access_instance(self, tm_uuid=None, cluster_uuid=None): + endpoint = "clusters/{0}".format(cluster_uuid) + data = { + "deleteReplicatedSnapshots": True, + "deleteReplicatedProtectionDomains": True, + } + return self.delete(uuid=tm_uuid, data=data, endpoint=endpoint) diff --git a/plugins/module_utils/ndb/vlans.py b/plugins/module_utils/ndb/vlans.py new file mode 100644 index 000000000..bd0629caf --- /dev/null +++ b/plugins/module_utils/ndb/vlans.py @@ -0,0 +1,248 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +from copy import deepcopy + +from .clusters import get_cluster_uuid +from .nutanix_database import NutanixDatabase + +__metaclass__ = type + + +class VLAN(NutanixDatabase): + def __init__(self, module): + resource_type = "/resources/networks" + super(VLAN, self).__init__(module, resource_type=resource_type) + self.build_spec_methods = { + "name": self._build_spec_name, + "desc": self._build_spec_desc, + "vlan_type": self._build_spec_type, + "ip_pools": self._build_spec_ip_pools, + "gateway": self._build_spec_gateway, + "subnet_mask": self._build_spec_subnet_mask, + "primary_dns": self._build_spec_primary_dns, + "secondary_dns": self._build_spec_secondary_dns, + "dns_domain": self._build_spec_dns_domain, + "cluster": self._build_spec_cluster, + } + + def get_spec(self, old_spec=None, params=None, **kwargs): + if kwargs.get("validate_module_params"): + err = self._validate_module_params(old_spec) + if err: + return None, err + return super().get_spec(old_spec=old_spec, params=params, **kwargs) + + def get_uuid( + self, + value, + key="name", + data=None, + entity_type=None, + raise_error=True, + no_response=False, + ): + query = {key: value} + resp = self.read(query=query, raise_error=False) + if resp is None: + return None, "vlan with name {0} not found.".format(value) + uuid = resp.get("id") + return uuid, None + + def get_vlan(self, name=None, uuid=None, detailed=True): + default_query = {"detailed": detailed} + if uuid: + query = {"id": uuid} + query.update(deepcopy(default_query)) + resp = self.read(query=query) + if not resp: + return None, "vlan with uuid {0} not found".format(uuid) + elif name: + query = {"name": name} + query.update(deepcopy(default_query)) + resp = self.read(query=query) + if not resp: + return None, "vlan with name {0} not found".format(name) + else: + return ( + None, + "Please provide either uuid or name for fetching vlan details", + ) + + return resp, None + + def _get_default_spec(self): + return deepcopy( + { + "name": "", + "type": "", + "properties": [], + "ipPools": [], + "clusterId": "", + } + ) + + def get_default_update_spec(self, override_spec=None): + spec = deepcopy( + { + "name": "", + "type": "", + "properties": [], + "clusterId": "", + } + ) + if override_spec: + for key in spec.keys(): + if override_spec.get(key): + spec[key] = deepcopy(override_spec[key]) + + return spec + + def _build_spec_name(self, payload, name): + payload["name"] = name + return payload, None + + def _build_spec_desc(self, payload, value): + payload["description"] = value + return payload, None + + def _build_spec_type(self, payload, vlan_type): + payload["type"] = vlan_type + return payload, None + + def _build_spec_cluster(self, payload, param): + uuid, err = get_cluster_uuid(config=param, module=self.module) + if err: + return None, err + payload["clusterId"] = uuid + return payload, None + + def _build_spec_ip_pools(self, payload, ip_pools): + ip_pools_spec = [] + for ip_pool in ip_pools: + start_ip = ip_pool["start_ip"] + end_ip = ip_pool.get("end_ip") or ip_pool["start_ip"] + ip_pool = {"startIP": start_ip, "endIP": end_ip} + ip_pools_spec.append(ip_pool) + payload["ipPools"] = ip_pools_spec + return payload, None + + def _build_spec_remove_ip_pools(self, ip_pools): + payload = {"ipPools": []} + for ip_pool_uuid in ip_pools: + ip_pool = {"id": ip_pool_uuid} + payload["ipPools"].append(ip_pool) + return payload + + def _build_spec_gateway(self, payload, gateway): + old_property = self._get_property_by_name("VLAN_GATEWAY", payload["properties"]) + if old_property: + old_property["value"] = gateway + else: + payload["properties"].append({"name": "VLAN_GATEWAY", "value": gateway}) + return payload, None + + def _build_spec_subnet_mask(self, payload, subnet_mask): + old_property = self._get_property_by_name( + "VLAN_SUBNET_MASK", payload["properties"] + ) + if old_property: + old_property["value"] = subnet_mask + else: + payload["properties"].append( + {"name": "VLAN_SUBNET_MASK", "value": subnet_mask} + ) + return payload, None + + def _build_spec_primary_dns(self, payload, primary_dns): + old_property = self._get_property_by_name( + "VLAN_PRIMARY_DNS", payload["properties"] + ) + if old_property: + old_property["value"] = primary_dns + else: + payload["properties"].append( + {"name": "VLAN_PRIMARY_DNS", "value": primary_dns} + ) + return payload, None + + def _build_spec_secondary_dns(self, payload, secondary_dns): + old_property = self._get_property_by_name( + "VLAN_SECONDARY_DNS", payload["properties"] + ) + if old_property: + old_property["value"] = secondary_dns + else: + payload["properties"].append( + {"name": "VLAN_SECONDARY_DNS", "value": secondary_dns} + ) + return payload, None + + def _build_spec_dns_domain(self, payload, dns_domain): + old_property = self._get_property_by_name( + "VLAN_DNS_DOMAIN", payload["properties"] + ) + if old_property: + old_property["value"] = dns_domain + else: + payload["properties"].append( + {"name": "VLAN_DNS_DOMAIN", "value": dns_domain} + ) + return payload, None + + def _validate_module_params(self, payload=None): + updated_vlan_type = self.module.params.get("vlan_type") + old_vlan_type = payload.get("type") if payload else None + vlan_type = updated_vlan_type or old_vlan_type + if vlan_type == "DHCP": + for item in [ + "gateway", + "subnet_mask", + "primary_dns", + "secondary_dns", + "dns_domain", + "ip_pools", + ]: + if item in self.module.params: + err = "{0} cannot be provided if vlan_type is DHCP".format(item) + return err + if updated_vlan_type == "Static" and updated_vlan_type != old_vlan_type: + for item in ["gateway", "subnet_mask", "primary_dns"]: + if item not in self.module.params: + err = "{0} is required if vlan_type is Static".format(item) + return err + return None + + def _get_property_by_name(self, name, properties): + for property in properties: + if property["name"] == name: + return property + return None + + def add_ip_pools(self, vlan_uuid, ip_pools, old_spec=None): + vlan_type = self.module.params.get("vlan_type") or old_spec.get("type", None) + if vlan_type == "DHCP": + err = "ip_pools cannot be provided if vlan_type is DHCP" + return None, err + + spec, err = self._build_spec_ip_pools({}, ip_pools) + if err: + return None, err + endpoint = "ip-pool" + resp = self.update( + uuid=vlan_uuid, + data=spec, + endpoint=endpoint, + method="POST", + raise_error=False, + ) + if resp and resp.get("errorCode"): + err = resp.get("message") + return None, err + return resp, None + + def remove_ip_pools(self, vlan_uuid, ip_pools): + spec = self._build_spec_remove_ip_pools(ip_pools) + endpoint = "ip-pool" + return self.delete(uuid=vlan_uuid, data=spec, endpoint=endpoint) diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 926a5c1e8..0871df63e 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -18,13 +18,18 @@ def remove_param_with_none_value(d): remove_param_with_none_value(e) -def strip_extra_attrs(spec1, spec2): +def strip_extra_attrs(spec1, spec2, deep=True): + """ + This routine strip extra attributes from spec1 as per spec2. + If 'deep' is True then attributes are checked in all levels of + dictionary, else only first level of dict is checked. + """ for k, v in spec1.copy().items(): if k not in spec2: spec1.pop(k) - elif isinstance(v, dict): + elif isinstance(v, dict) and deep: strip_extra_attrs(spec1[k], spec2[k]) - elif isinstance(v, list) and v and isinstance(v[0], dict): + elif isinstance(v, list) and v and isinstance(v[0], dict) and deep: for i in range(len(v)): try: strip_extra_attrs(spec1[k][i], spec2[k][i]) @@ -92,3 +97,15 @@ def extract_uuids_from_references_list(reference_lists): for spec in reference_lists: uuids.add(spec["uuid"]) return uuids + + +def format_filters_map(filters, except_keys=None): + if filters: + mapped_filters = {} + for key, value in filters.items(): + if value is not None: + if except_keys is None or key not in except_keys: + key = key.replace("_", "-") + mapped_filters.update({key: value}) + filters = mapped_filters + return filters diff --git a/plugins/modules/ntnx_ndb_authorize_db_server_vms.py b/plugins/modules/ntnx_ndb_authorize_db_server_vms.py new file mode 100644 index 000000000..17b47061a --- /dev/null +++ b/plugins/modules/ntnx_ndb_authorize_db_server_vms.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +DOCUMENTATION = r""" +--- +module: ntnx_ndb_authorize_db_server_vms +short_description: module for authorizing db server vm +version_added: 1.8.0 +description: module for authorizing db server vm with time machine +options: + db_server_vms: + description: + - list of database server vms details + type: list + elements: dict + required: true + suboptions: + name: + description: + - name of database server vm + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of database server vm + - mutually exclusive with C(name) + type: str + time_machine: + description: + - time machine details + type: dict + required: true + suboptions: + name: + description: + - name of time machine + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of time machine + - mutually exclusive with C(name) + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: authorize db server vms + ntnx_ndb_authorize_db_server_vms: + time_machine: + name: "{{tm1}}" + db_server_vms: + - name: "{{vm1_name}}" + register: result + +- name: deauthorize db server vms + ntnx_ndb_authorize_db_server_vms: + time_machine: + name: "{{tm1}}" + db_server_vms: + - name: "{{vm1_name}}" + register: result +""" +RETURN = r""" +response: + description: An intentful representation of a authorizisation status + returned: always + type: dict + sample: { + "errorCode": 0, + "info": null, + "message": "The DBServer(s) [5c14b4d4-553f-4b93-a3c4-a6685da2732b] + got successfully associated with the Time Machine (id:7a39664b-dfb7-4529-887c-6d91f7e18604, name:test-setup-dnd_TM)", + "status": "success" + } +uuid: + description: Time machine uuid + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" +""" + + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + mutually_exclusive = [("name", "uuid")] + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + + module_args = dict( + db_server_vms=dict( + type="list", + elements="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=True, + ), + time_machine=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=True, + ), + ) + return module_args + + +def authorize_db_server_vms(module, result): + time_machine = TimeMachine(module) + if not module.params.get("time_machine"): + module.fail_json( + msg="'time_machine' is required for authorizing db server vms with time machine" + ) + + time_machine_uuid, err = time_machine.get_time_machine_uuid( + module.params.get("time_machine") + ) + if err: + result["response"] = err + module.fail_json(msg="Failed fetching time machine uuid", **result) + + spec, err = time_machine.get_authorize_db_server_vms_spec() + if err: + result["response"] = err + module.fail_json(msg="Failed getting authorizing db server vm spec", **result) + + if module.check_mode: + result["response"] = spec + return + + resp = time_machine.authorize_db_server_vms(uuid=time_machine_uuid, data=spec) + result["response"] = resp + result["uuid"] = time_machine_uuid + result["changed"] = True + + +def deauthorize_db_server_vms(module, result): + time_machine = TimeMachine(module) + if not module.params.get("time_machine"): + module.fail_json( + msg="'time_machine' is required for deauthorizing db server vms with time machine" + ) + + time_machine_uuid, err = time_machine.get_time_machine_uuid( + module.params.get("time_machine") + ) + if err: + result["response"] = err + module.fail_json(msg="Failed fetching time machine uuid", **result) + + spec, err = time_machine.get_authorize_db_server_vms_spec() + if err: + result["response"] = err + module.fail_json(msg="Failed getting deauthorizing db server vm spec", **result) + + if module.check_mode: + result["response"] = spec + return + + resp = time_machine.deauthorize_db_server_vms(uuid=time_machine_uuid, data=spec) + result["response"] = resp + result["uuid"] = time_machine_uuid + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "uuid": None} + if module.params.get("state") == "present": + authorize_db_server_vms(module, result) + else: + deauthorize_db_server_vms(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_clones_info.py b/plugins/modules/ntnx_ndb_clones_info.py index d5aa4c83e..770173442 100644 --- a/plugins/modules/ntnx_ndb_clones_info.py +++ b/plugins/modules/ntnx_ndb_clones_info.py @@ -11,7 +11,7 @@ --- module: ntnx_ndb_clones_info short_description: info module for database clones -version_added: 1.8.0-beta.1 +version_added: 1.8.0 description: 'Get clone info' options: name: @@ -23,7 +23,7 @@ - clone id type: str extends_documentation_fragment: - - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_ndb_info_base_module author: - Prem Karat (@premkarat) - Gevorg Khachatryan (@Gevorg-Khachatryan-97) @@ -302,7 +302,7 @@ """ from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.clones import Clone # noqa: E402 +from ..module_utils.ndb.database_clones import DatabaseClone # noqa: E402 def get_module_spec(): @@ -316,7 +316,7 @@ def get_module_spec(): def get_clone(module, result): - clone = Clone(module) + clone = DatabaseClone(module) if module.params.get("name"): name = module.params["name"] resp, err = clone.get_clone(name=name) @@ -332,7 +332,7 @@ def get_clone(module, result): def get_clones(module, result): - clone = Clone(module) + clone = DatabaseClone(module) resp = clone.read() diff --git a/plugins/modules/ntnx_ndb_clusters.py b/plugins/modules/ntnx_ndb_clusters.py new file mode 100644 index 000000000..aa8b5565f --- /dev/null +++ b/plugins/modules/ntnx_ndb_clusters.py @@ -0,0 +1,485 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_clusters +short_description: "Create, Update and Delete NDB clusters" +version_added: 1.8.0 +description: "Create, Update and Delete NDB clusters" +options: + name: + type: str + description: + - Name of the cluster. + - Update allowed. + uuid: + type: str + description: UUID of the cluster. + desc: + type: str + description: + - Description of the cluster. + - Update allowed. + name_prefix: + type: str + description: Name prefix of the cluster. + cluster_ip: + type: str + description: + - IP address of cluster. + - Update allowed. + cluster_credentials: + type: dict + description: + - Credentials of the cluster. + - Update allowed. + suboptions: + username: + type: str + description: Cluster username + password: + type: str + description: Cluster password + agent_network: + type: dict + description: configure dns and ntp details. + suboptions: + dns_servers: + type: list + description: dns servers for clusters + elements: str + ntp_servers: + type: list + elements: str + description: ntp servers for clusters + vlan_access: + type: dict + description: VLAN access info for which you want to configure network segmentation + suboptions: + prism_vlan: + type: dict + description: + - VLAN access info to configure a VLAN that the NDB agent VM can use to communicate with Prism + suboptions: + vlan_name: + type: str + description: Name of subnet + vlan_type: + type: str + description: type of subnet + choices: ["DHCP", "Static"] + static_ip: + type: str + description: ip address of subnet + gateway: + type: str + description: The gateway ip address + subnet_mask: + type: str + description: Subnet network address + dsip_vlan: + type: dict + description: VLAN access info to configure a VLAN that the agent VM can use to make connection requests to the iSCSI data services IP. + suboptions: + vlan_name: + type: str + description: Name of subnet + vlan_type: + type: str + description: Type of subnet + choices: ["DHCP", "Static"] + static_ip: + type: str + description: ip address of subnet + gateway: + type: str + description: The gateway ip address + subnet_mask: + type: str + description: Subnet network address + dbserver_vlan: + type: dict + description: + - VLAN access info to configure a VLAN that is used for communications between + the NDB agent VM and the database server VM on the newly registered NDB server cluster. + suboptions: + vlan_name: + type: str + description: Name of subnet + vlan_type: + type: str + description: Type of subnet + choices: ["DHCP", "Static"] + static_ip: + type: str + description: ip address of subnet + gateway: + type: str + description: The gateway ip address + subnet_mask: + type: str + description: Subnet network address + storage_container: + type: str + description: Name of storage container. +extends_documentation_fragment: + - nutanix.ncp.ntnx_operations + - nutanix.ncp.ntnx_ndb_base_module +author: + - Prem Karat (@premkarat) + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" + - name: Register Cluster with prisim_vlan + ntnx_ndb_clusters: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + name: "cluster_name" + desc: "cluster_desc" + name_prefix: "cluster_name_prefix" + cluster_ip: "cluster_ip" + cluster_credentials: + username: "{{cluster_info.cluster_credentials.username}}" + password: "{{cluster_info.cluster_credentials.password}}" + agent_network: + dns_servers: + - "{{cluster_info.agent_network.dns_servers[0]}}" + - "{{cluster_info.agent_network.dns_servers[1]}}" + ntp_servers: + - "{{cluster_info.agent_network.ntp_servers[0]}}" + - "{{cluster_info.agent_network.ntp_servers[1]}}" + vlan_access: + prism_vlan: + vlan_name: "{{cluster_info.vlan_access.prism_vlan.vlan_name}}" + vlan_type: "{{cluster_info.vlan_access.prism_vlan.vlan_type}}" + static_ip: "{{cluster_info.vlan_access.prism_vlan.static_ip}}" + gateway: "{{cluster_info.vlan_access.prism_vlan.gateway}}" + subnet_mask: "{{cluster_info.vlan_access.prism_vlan.subnet_mask}}" + storage_container: "{{cluster_info.storage_container}}" + + - name: update cluster name , desc + ntnx_ndb_clusters: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + uuid: "cluster_uuid" + name: newname + desc: newdesc + + - name: delete cluster + ntnx_ndb_clusters: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + uuid: "cluster_uuid" + state: absent +""" + +RETURN = r""" +response: + description: An intentful representation of a cluster status + returned: always + type: dict + sample: + { + "cloudInfo": null, + "cloudType": "NTNX", + "dateCreated": "2023-01-22 08:59:42.12768", + "dateModified": "2023-01-22 09:12:07.842244", + "description": "newdesc", + "entityCounts": null, + "fqdns": null, + "healthy": true, + "hypervisorType": "AHV", + "hypervisorVersion": "6.1", + "id": "0000-0000-000-00000-0000", + "ipAddresses": [ + "10.46.33.223" + ], + "managementServerInfo": null, + "name": "newname", + "nxClusterUUID": "0000-0000-000-00000-0000", + "ownerId": "0000-0000-000-00000-0000", + "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "properties": [ + { + "description": null, + "name": "CLUSTER_ID", + "ref_id": "0000-0000-000-00000-0000", + "secure": false, + "value": "0000-0000-000-00000-0000", + }, + { + "description": null, + "name": "CLUSTER_INCARNATION_ID", + "ref_id": "0000-0000-000-00000-0000", + "secure": false, + "value": "1670238836188065" + }, + { + "description": null, + "name": "ERA_STORAGE_CONTAINER", + "ref_id": "0000-0000-000-00000-0000", + "secure": false, + "value": "default-container-95673204421578" + }, + { + "description": null, + "name": "MODEL_NAME", + "ref_id": "0000-0000-000-00000-0000", + "secure": false, + "value": "NX-1065-G5" + }, + { + "description": null, + "name": "ONDEMAND_REPLICATION_SUPPORTED", + "ref_id": "0000-0000-000-00000-0000", + "secure": false, + "value": "true" + }, + { + "description": null, + "name": "PRISM_VM_LIST_PAGINATION_LIMIT", + "ref_id": "0000-0000-000-00000-0000", + "secure": false, + "value": "500" + }, + { + "description": null, + "name": "PRISM_VM_LIST_PAGINATION_SIZE", + "ref_id": "0000-0000-000-00000-0000", + "secure": false, + "value": "50" + }, + { + "description": null, + "name": "RESOURCE_CONFIG", + "ref_id": "0000-0000-000-00000-0000", + "secure": false, + "value": "{\"storageThresholdPercentage\":95.0,\"memoryThresholdPercentage\":95.0}" + }, + { + "description": null, + "name": "TIMEZONE", + "ref_id": "0000-0000-000-00000-0000", + "secure": false, + "value": "UTC" + } + ], + "referenceCount": 0, + "resourceConfig": { + "memoryThresholdPercentage": 95.0, + "storageThresholdPercentage": 95.0 + }, + "status": "UP", + "uniqueName": "NEWNAME", + "username": "admin", + "version": "v2", + } +cluster_uuid: + description: The created cluster uuid + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" +""" + +import time # noqa: E402 + +from ..module_utils import utils # noqa: E402 +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.clusters import Cluster # noqa: E402 +from ..module_utils.ndb.operations import Operation # noqa: E402 + + +def get_module_spec(): + + credentials_spec = dict( + username=dict(type="str"), + password=dict(type="str", no_log=True), + ) + + agent_network_spec = dict( + dns_servers=dict(type="list", elements="str"), + ntp_servers=dict(type="list", elements="str"), + ) + + vlan_access_spec = dict( + vlan_name=dict(type="str"), + vlan_type=dict(type="str", choices=["DHCP", "Static"]), + static_ip=dict(type="str"), + gateway=dict(type="str"), + subnet_mask=dict(type="str"), + ) + + vlan_access_type_spec = dict( + prism_vlan=dict(type="dict", options=vlan_access_spec), + dsip_vlan=dict(type="dict", options=vlan_access_spec), + dbserver_vlan=dict(type="dict", options=vlan_access_spec), + ) + + module_args = dict( + name=dict(type="str"), + uuid=dict(type="str"), + desc=dict(type="str"), + name_prefix=dict(type="str"), + cluster_ip=dict(type="str"), + cluster_credentials=dict( + type="dict", + required_together=[("username", "password")], + options=credentials_spec, + ), + agent_network=dict(type="dict", options=agent_network_spec), + vlan_access=dict(type="dict", options=vlan_access_type_spec), + storage_container=dict(type="str"), + ) + + return module_args + + +def create_cluster(module, result): + cluster = Cluster(module) + cluster_ip = module.params.get("cluster_ip") + + spec, err = cluster.get_spec() + if err: + result["error"] = err + module.fail_json(msg="Failed generating create cluster spec", **result) + + if cluster.get_cluster_by_ip(cluster_ip): + module.fail_json( + msg="The provided cluster IP is already registered with NDB.", **result + ) + + if module.check_mode: + result["response"] = spec + return + + resp = cluster.create(spec) + ops_uuid = resp["operationId"] + + cluster_name = module.params.get("name") + cluster_uuid = cluster.get_uuid(key="name", value=cluster_name) + + if not cluster_uuid: + result["error"] = err + module.fail_json(msg="Failed getting cluster uuid", **result) + + result["cluster_uuid"] = cluster_uuid + result["changed"] = True + + if module.params.get("wait"): + operations = Operation(module) + time.sleep(5) # wait for ops to starts + operations.wait_for_completion(ops_uuid) + resp = cluster.read(cluster_uuid) + + result["response"] = resp + + +def update_cluster(module, result): + cluster_uuid = module.params["uuid"] + + cluster = Cluster(module) + + resp = cluster.read(cluster_uuid) + old_spec = cluster.get_default_update_spec(override_spec=resp) + + update_spec, err = cluster.get_spec(old_spec=old_spec) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating update cluster spec", **result) + + result["cluster_uuid"] = cluster_uuid + + if module.check_mode: + result["response"] = update_spec + return + + if check_for_idempotency(old_spec, update_spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + resp = cluster.update(data=update_spec, uuid=cluster_uuid) + result["changed"] = True + + result["response"] = resp + + +def delete_cluster(module, result): + cluster_uuid = module.params["uuid"] + + cluster = Cluster(module) + resp, err = cluster.delete(cluster_uuid) + if err: + result["error"] = err + module.fail_json(msg="Failed removing cluster", **result) + result["response"] = resp + + ops_uuid = resp["operationId"] + + if module.params.get("wait"): + operations = Operation(module) + time.sleep(2) # to get operation ID functional + resp = operations.wait_for_completion(ops_uuid, delay=5) + result["response"] = resp + result["changed"] = True + result["cluster_uuid"] = cluster_uuid + + +def check_for_idempotency(old_spec, update_spec): + if old_spec == update_spec: + return True + return False + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("name", "uuid"), True), + ("state", "present", ("cluster_ip", "uuid"), True), + ("state", "present", ("cluster_credentials", "uuid"), True), + ("state", "absent", ("uuid",)), + ], + mutually_exclusive=[ + ("uuid", "name_prefix"), + ("uuid", "agent_network"), + ("uuid", "vlan_access"), + ("uuid", "storage_container"), + ], + ) + utils.remove_param_with_none_value(module.params) + result = {"response": {}, "error": None, "changed": False} + state = module.params["state"] + if state == "present": + if module.params.get("uuid"): + update_cluster(module, result) + else: + create_cluster(module, result) + else: + delete_cluster(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_clusters_info.py b/plugins/modules/ntnx_ndb_clusters_info.py index cbde76e4b..d27527d67 100644 --- a/plugins/modules/ntnx_ndb_clusters_info.py +++ b/plugins/modules/ntnx_ndb_clusters_info.py @@ -11,7 +11,7 @@ --- module: ntnx_ndb_clusters_info short_description: info module for ndb clusters info -version_added: 1.8.0-beta.1 +version_added: 1.8.0 description: 'Get clusters info' options: name: @@ -22,12 +22,22 @@ description: - cluster id type: str + filters: + description: + - params to be considered for filtering response + type: dict + suboptions: + count_entities: + description: + - to show dependent entities of clusters + type: bool extends_documentation_fragment: - - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_ndb_info_base_module author: - Prem Karat (@premkarat) - Pradeepsingh Bhati (@bhati-pradeep) - Alaa Bishtawi (@alaa-bish) + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) """ EXAMPLES = r""" @@ -169,9 +179,17 @@ def get_module_spec(): + filters_spec = dict( + count_entities=dict(type="bool"), + ) + module_args = dict( name=dict(type="str"), uuid=dict(type="str"), + filters=dict( + type="dict", + options=filters_spec, + ), ) return module_args @@ -194,8 +212,9 @@ def get_cluster(module, result): def get_clusters(module, result): cluster = Cluster(module) + query_params = module.params.get("filters") - resp = cluster.read() + resp = cluster.read(query=query_params) result["response"] = resp @@ -204,7 +223,11 @@ def run_module(): module = NdbBaseInfoModule( argument_spec=get_module_spec(), supports_check_mode=False, - mutually_exclusive=[("name", "uuid")], + mutually_exclusive=[ + ("name", "uuid"), + ("name", "filters"), + ("uuid", "filters"), + ], ) result = {"changed": False, "error": None, "response": None} if module.params.get("name") or module.params.get("uuid"): diff --git a/plugins/modules/ntnx_ndb_database_clone_refresh.py b/plugins/modules/ntnx_ndb_database_clone_refresh.py new file mode 100644 index 000000000..687145e37 --- /dev/null +++ b/plugins/modules/ntnx_ndb_database_clone_refresh.py @@ -0,0 +1,362 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +DOCUMENTATION = r""" +--- +module: ntnx_ndb_database_clone_refresh +short_description: module for database clone refresh. +version_added: 1.8.0 +description: moudle for refreshing database clone to certain point in time or snapshot. +options: + uuid: + description: + - uuid of database clone + type: str + snapshot_uuid: + description: + - snapshot uuid for clone refresh + type: str + timezone: + description: + - timezone related to pitr_timestamp given + type: str + default: "Asia/Calcutta" + pitr_timestamp: + description: + - timestamp for point in time database cone refresh + - format is 'yyyy-mm-dd hh:mm:ss' + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: create spec for refresh clone to a pitr timestamp + check_mode: yes + ntnx_ndb_database_clone_refresh: + uuid: "{{clone_uuid}}" + pitr_timestamp: "2023-02-04 07:29:36" + timezone: "UTC" + register: result + +- name: refresh db clone + ntnx_ndb_database_clone_refresh: + uuid: "{{clone_uuid}}" + snapshot_uuid: "{{snapshot_uuid}}" + register: result + +""" +RETURN = r""" +response: + description: An intentful representation of a clone status + returned: always + type: dict + sample: { + "id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "ansible-clone-updated-updated-updated-updated3s", + "description": "ansible-clone-desc-updated-updated", + "dateCreated": "2023-02-28 06:52:31", + "dateModified": "2023-02-28 07:20:10", + "properties": [ + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "CLONE_PD_OBJ_LIST", + "value": "9f491f43-e343-45d7-b552-5f38a647e018", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "primaryHost", + "value": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "BASE_SIZE", + "value": "{\"clusterStorage\": {\"0a3b964f-8616-40b9-a564-99cf35f4b8d8\": + {\"9b8f4814-4536-42ef-9760-73341dbdc85a\": {\"size\": 304740352, \"allocatedSize\": 0, \"usedSize\": 0, \"unit\": \"B\"}, + \"ffdb3000-22bc-4994-86f5-5bb668422e5e\": {\"size\": 303677440, \"allocatedSize\": 0, \"usedSize\": 0, \"unit\": \"B\"}, + \"55034431-4f5b-48e0-bc58-13676bf9ed9b\": {\"size\": 9267200, \"allocatedSize\": 0, \"usedSize\": 0, \"unit\": \"B\"}, + \"57e55810-0702-4f63-87b9-ff67921b6466\" + : {\"size\": 5439488, \"allocatedSize\": 0, \"usedSize\": 0, \"unit\": \"B\"}}}}", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "version", + "value": "10.4", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "vm_ip", + "value": "xx.xx.xx.xx", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "postgres_software_home", + "value": "%2Fusr%2Fpgsql-10.4", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "listener_port", + "value": "2345", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "db_parameter_profile_id", + "value": "6bc3ceef-1681-49fa-b65d-cd968a33775e", + "secure": false, + "description": null + } + ], + "tags": [], + "clustered": false, + "clone": true, + "eraCreated": true, + "type": "postgres_database", + "status": "READY", + "timeMachineId": "2ec7d4a9-c6e6-4f51-a4bd-1af7f8ee8ca8", + "parentTimeMachineId": "7a39664b-dfb7-4529-887c-6d91f7e18604", + "timeZone": "UTC", + "lastRefreshTimestamp": "2023-02-28 06:52:49", + "sourceSnapshotId": "d8e62324-be91-4297-b116-10d42d186aff", + "provisionOperationId": null, + "metric": null, + "category": "DB_GROUP_IMPLICIT", + "parentDatabaseId": null, + "parentSourceDatabaseId": null, + "lcmConfig": null, + "timeMachine": null, + "databaseNodes": [ + { + "id": "aa11923c-8cb6-442a-87c1-5897b3e41af1", + "name": "ansible-clone-updated-updated-updated-updated3s", + "description": "", + "dateCreated": "2023-02-28 07:08:57", + "dateModified": "2023-02-28 07:18:47", + "properties": [], + "tags": [], + "databaseId": "4b86551d-168f-405b-a888-89ac9082bdff", + "status": "READY", + "databaseStatus": "READY", + "primary": false, + "dbserverId": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "softwareInstallationId": "2a3b5a9e-80c0-478d-b5da-d56dd8e6c628", + "protectionDomainId": "9f491f43-e343-45d7-b552-5f38a647e018", + "info": { + "secureInfo": null, + "info": null + }, + "metadata": null, + "dbserver": null, + "protectionDomain": null + } + ], + "linkedDatabases": [ + { + "id": "7827ece1-7c86-46f1-8596-1b77ea179e87", + "name": "postgres", + "databaseName": "postgres", + "description": null, + "status": "READY", + "databaseStatus": "READY", + "parentDatabaseId": "4b86551d-168f-405b-a888-89ac9082bdff", + "parentLinkedDatabaseId": "6e3733cf-2994-49d2-945c-c1873564be97", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "dateCreated": "2023-02-28 07:18:16", + "dateModified": "2023-02-28 07:18:16", + "timeZone": null, + "info": { + "secureInfo": null, + "info": { + "created_by": "system" + } + }, + "metadata": null, + "metric": null, + "tags": [], + "parentDatabaseType": null, + "parentDatabaseName": null, + "snapshotId": null + }, + { + "id": "5251f347-8562-4bf3-aeb6-2105fc49cace", + "name": "prad", + "databaseName": "prad", + "description": null, + "status": "READY", + "databaseStatus": "READY", + "parentDatabaseId": "4b86551d-168f-405b-a888-89ac9082bdff", + "parentLinkedDatabaseId": "779f1f6a-502d-4ffd-9030-d21447c5ca3d", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "dateCreated": "2023-02-28 07:18:16", + "dateModified": "2023-02-28 07:18:16", + "timeZone": null, + "info": { + "secureInfo": null, + "info": { + "created_by": "user" + } + }, + "metadata": null, + "metric": null, + "tags": [], + "parentDatabaseType": null, + "parentDatabaseName": null, + "snapshotId": null + }, + { + "id": "df365e63-5b15-4d04-902f-2e871d7f339b", + "name": "template1", + "databaseName": "template1", + "description": null, + "status": "READY", + "databaseStatus": "READY", + "parentDatabaseId": "4b86551d-168f-405b-a888-89ac9082bdff", + "parentLinkedDatabaseId": "d013a63f-c9ba-4533-989d-57e57d8a4d6f", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "dateCreated": "2023-02-28 07:18:16", + "dateModified": "2023-02-28 07:18:16", + "timeZone": null, + "info": { + "secureInfo": null, + "info": { + "created_by": "system" + } + }, + "metadata": null, + "metric": null, + "tags": [], + "parentDatabaseType": null, + "parentDatabaseName": null, + "snapshotId": null + }, + { + "id": "82d14427-382e-4e3b-99e1-5359bb5f7abc", + "name": "template0", + "databaseName": "template0", + "description": null, + "status": "READY", + "databaseStatus": "READY", + "parentDatabaseId": "4b86551d-168f-405b-a888-89ac9082bdff", + "parentLinkedDatabaseId": "c18419fd-df31-4e54-b35a-ee004c0faafb", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "dateCreated": "2023-02-28 07:18:16", + "dateModified": "2023-02-28 07:18:16", + "timeZone": null, + "info": { + "secureInfo": null, + "info": { + "created_by": "system" + } + }, + "metadata": null, + "metric": null, + "tags": [], + "parentDatabaseType": null, + "parentDatabaseName": null, + "snapshotId": null + } + ], + "databases": null, +} +uuid: + description: Database clone uuid + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" +""" +import time # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.database_clones import DatabaseClone # noqa: E402 +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + + module_args = dict( + uuid=dict(type="str", required=False), + snapshot_uuid=dict(type="str", required=False), + timezone=dict(type="str", default="Asia/Calcutta", required=False), + pitr_timestamp=dict(type="str", required=False), + ) + return module_args + + +def refresh_clone(module, result): + db_clone = DatabaseClone(module) + + uuid = module.params.get("uuid") + if not uuid: + module.fail_json( + msg="uuid is required field for database clone refresh", **result + ) + + spec, err = db_clone.get_clone_refresh_spec() + if err: + result["error"] = err + module.fail_json(msg="Failed getting spec for database clone refresh", **result) + + if module.check_mode: + result["response"] = spec + return + + resp = db_clone.refresh(uuid=uuid, data=spec) + result["response"] = resp + result["uuid"] = uuid + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(5) # to get operation ID functional + operations.wait_for_completion(ops_uuid) + resp = db_clone.read(uuid) + result["response"] = resp + + result["changed"] = True + + +def run_module(): + mutually_exclusive_list = [ + ("snapshot_uuid", "pitr_timestamp"), + ] + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[("state", "present", ("snapshot_uuid", "pitr_timestamp"), True)], + mutually_exclusive=mutually_exclusive_list, + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "uuid": None} + refresh_clone(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_database_clones.py b/plugins/modules/ntnx_ndb_database_clones.py new file mode 100644 index 000000000..a6485ee76 --- /dev/null +++ b/plugins/modules/ntnx_ndb_database_clones.py @@ -0,0 +1,953 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +DOCUMENTATION = r""" +--- +module: ntnx_ndb_database_clones +short_description: module for create, update and delete of ndb database clones +version_added: 1.8.0 +description: module for create, update and delete of ndb database clones +options: + uuid: + description: + - uuid of database clone for update and delete + type: str + name: + description: + - name of database clone + - update is allowed + - mandatory for creation + type: str + desc: + description: + - description of database clone + - update is allowed + type: str + db_params_profile: + description: + - database parameter profile for creating database clone + - mandatory for creation + type: dict + suboptions: + name: + description: + - profile name + - Mutually exclusive with C(uuid) + type: str + uuid: + description: + - profile UUID + - Mutually exclusive with C(name) + type: str + db_vm: + description: + - database server vm details for hosting clone + - mandatory for creation + type: dict + suboptions: + create_new_server: + description: + - configuration for creating new database server vm for hosting db clone + - Mutually exclusive with C(use_authorized_server) + type: dict + suboptions: + name: + description: + - name of db server vm + type: str + required: true + desc: + description: + - description of database server vm + type: str + pub_ssh_key: + description: + - use SSH public key to access the database server VM + type: str + required: true + password: + description: + - password for newly created db server vm + type: str + required: true + cluster: + description: + - cluster details to host the vm + type: dict + required: true + suboptions: + name: + description: + - cluster name + - Mutually exclusive with C(uuid) + type: str + uuid: + description: + - cluster UUID + - Mutually exclusive with C(name) + type: str + network_profile: + description: + - network profile details + type: dict + required: true + suboptions: + name: + description: + - profile name + - Mutually exclusive with C(uuid) + type: str + uuid: + description: + - profile UUID + - Mutually exclusive with C(name) + type: str + compute_profile: + description: + - compute profile details + type: dict + required: true + suboptions: + name: + description: + - profile name + - Mutually exclusive with C(uuid) + type: str + uuid: + description: + - profile UUID + - Mutually exclusive with C(name) + type: str + + use_authorized_server: + description: + - conifgure authorized database server VM for hosting database clone + type: dict + suboptions: + name: + description: + - authorized database server vm name + - Mutually exclusive with C(uuid) + type: str + uuid: + description: + - authorized database server vm uuid + - Mutually exclusive with C(name) + type: str + time_machine: + description: + - source time machine details + - mandatory for creation + type: dict + suboptions: + name: + description: + - name of time machine + - mutually_exclusive with C(uuid) + type: str + uuid: + description: + - UUId of time machine + - mutually_exclusive with C(name) + type: str + snapshot_uuid: + description: + - source snapshot uuid + - mutually exclusive with C(pitr_timestamp) + type: str + timezone: + description: + - timezone related to C(pitr_timestamp) + type: str + default: "Asia/Calcutta" + pitr_timestamp: + description: + - timestamp for create clone from point in time + type: str + postgres: + description: + - postgres database related config + - mandatory for creation + type: dict + suboptions: + db_password: + description: + - set database password + type: str + required: true + pre_clone_cmd: + description: + - commands to run before database clone creation + type: str + post_clone_cmd: + description: + - commands to run after database clone creation + type: str + tags: + description: + - list of tags name and value pairs to be associated with clone + - during update, given input tags override the exiting tags of clone + type: dict + removal_schedule: + description: + - clone removal schedule + - update is allowed + type: dict + suboptions: + state: + description: + - state of schedule if added + - create, update and delete is allowed + type: str + choices: ["present", "absent"] + default: "present" + days: + description: + - number of days after which clone will be removed + - mutually exclusive to C(timestamp) + type: int + timestamp: + description: + - exact timestamp to remove database clone + - format is 'yyyy-mm-dd hh:mm:ss' + - mutually exclusive to C(days) + type: str + timezone: + description: + - timezone related to C(timestamp) + type: str + delete_database: + description: + - whether to delete database as well from clone instance during removal + type: bool + default: false + remind_before_in_days: + description: + - reminder in days before removal + type: int + refresh_schedule: + description: + - clone refresh schedule + - update is allowed + type: dict + suboptions: + state: + description: + - state of schedule if added + - create, update and delete is allowed + type: str + choices: ["present", "absent"] + default: "present" + days: + description: + - number of days after which clone will be refreshed + type: int + timezone: + description: + - timezone related to C(time) give + type: str + time: + description: + - exact time on particular day when clone will be refreshed + type: str + delete_from_vm: + description: + - during delete, flag for deleting the database from database server vm as well + - mutually exclusive with C(soft_remove) + type: bool + soft_remove: + description: + - soft remove during delete process + - mutually exclusive with C(delete_from_vm) + type: bool +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: create clone using snapshot + ntnx_ndb_database_clones: + name: "{{clone_db1}}" + desc: "ansible-created-clone" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + create_new_server: + name: "{{ vm1_name }}" + desc: "vm for db server" + password: "{{ vm_password }}" + cluster: + name: "{{cluster.cluster1.name}}" + network_profile: + name: "{{ network_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + pub_ssh_key: "{{ public_ssh_key }}" + + postgres: + db_password: "{{vm_password}}" + + time_machine: + name: "{{tm1}}" + snapshot_uuid: "{{snapshot_uuid}}" + + removal_schedule: + days: 2 + timezone: "Asia/Calcutta" + remind_before_in_days: 1 + delete_database: True + + refresh_schedule: + days: 2 + time: "12:00:00" + timezone: "Asia/Calcutta" + + tags: + ansible-clones: ansible-test-db-clones + register: result + +- name: create clone using point in time + ntnx_ndb_database_clones: + name: "{{clone_db1}}" + desc: "ansible-created-clone" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + create_new_server: + name: "{{ vm1_name }}" + desc: "vm for db server" + password: "{{ vm_password }}" + cluster: + name: "{{cluster.cluster1.name}}" + network_profile: + name: "{{ network_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + pub_ssh_key: "{{ public_ssh_key }}" + + postgres: + db_password: "{{vm_password}}" + + time_machine: + name: "{{tm1}}" + pitr_timestamp: "2023-02-28 12:00:00" + timestamp: "Asia/Calcutta" + + removal_schedule: + days: 2 + timezone: "Asia/Calcutta" + remind_before_in_days: 1 + delete_database: True + + refresh_schedule: + days: 2 + time: "12:00:00" + timezone: "Asia/Calcutta" + + tags: + ansible-clones: ansible-test-db-clones + register: result + +""" + +RETURN = r""" +response: + description: An intentful representation of a clone status + returned: always + type: dict + sample: { + "id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "ansible-clone-updated-updated-updated-updated3s", + "description": "ansible-clone-desc-updated-updated", + "dateCreated": "2023-02-28 06:52:31", + "dateModified": "2023-02-28 07:20:10", + "properties": [ + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "CLONE_PD_OBJ_LIST", + "value": "9f491f43-e343-45d7-b552-5f38a647e018", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "primaryHost", + "value": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "BASE_SIZE", + "value": "{\"clusterStorage\": {\"0a3b964f-8616-40b9-a564-99cf35f4b8d8\": + {\"9b8f4814-4536-42ef-9760-73341dbdc85a\": {\"size\": 304740352, \"allocatedSize\": 0, \"usedSize\": 0, \"unit\": \"B\"}, + \"ffdb3000-22bc-4994-86f5-5bb668422e5e\": + {\"size\": 303677440, \"allocatedSize\": 0, \"usedSize\": 0, \"unit\": \"B\"}, + \"55034431-4f5b-48e0-bc58-13676bf9ed9b\": {\"size\": 9267200, \"allocatedSize\": 0, \"usedSize\": 0, \"unit\": \"B\"}, + \"57e55810-0702-4f63-87b9-ff67921b6466\": {\"size\": 5439488, \"allocatedSize\": 0, \"usedSize\": 0, \"unit\": \"B\"}}}}", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "version", + "value": "10.4", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "vm_ip", + "value": "xx.xx.xx.xx", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "postgres_software_home", + "value": "%2Fusr%2Fpgsql-10.4", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "listener_port", + "value": "2345", + "secure": false, + "description": null + }, + { + "ref_id": "4b86551d-168f-405b-a888-89ac9082bdff", + "name": "db_parameter_profile_id", + "value": "6bc3ceef-1681-49fa-b65d-cd968a33775e", + "secure": false, + "description": null + } + ], + "tags": [], + "clustered": false, + "clone": true, + "eraCreated": true, + "type": "postgres_database", + "status": "READY", + "timeMachineId": "2ec7d4a9-c6e6-4f51-a4bd-1af7f8ee8ca8", + "parentTimeMachineId": "7a39664b-dfb7-4529-887c-6d91f7e18604", + "timeZone": "UTC", + "lastRefreshTimestamp": "2023-02-28 06:52:49", + "sourceSnapshotId": "d8e62324-be91-4297-b116-10d42d186aff", + "provisionOperationId": null, + "metric": null, + "category": "DB_GROUP_IMPLICIT", + "parentDatabaseId": null, + "parentSourceDatabaseId": null, + "lcmConfig": null, + "timeMachine": null, + "databaseNodes": [ + { + "id": "aa11923c-8cb6-442a-87c1-5897b3e41af1", + "name": "ansible-clone-updated-updated-updated-updated3s", + "description": "", + "dateCreated": "2023-02-28 07:08:57", + "dateModified": "2023-02-28 07:18:47", + "properties": [], + "tags": [], + "databaseId": "4b86551d-168f-405b-a888-89ac9082bdff", + "status": "READY", + "databaseStatus": "READY", + "primary": false, + "dbserverId": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "softwareInstallationId": "2a3b5a9e-80c0-478d-b5da-d56dd8e6c628", + "protectionDomainId": "9f491f43-e343-45d7-b552-5f38a647e018", + "info": { + "secureInfo": null, + "info": null + }, + "metadata": null, + "dbserver": null, + "protectionDomain": null + } + ], + "linkedDatabases": [ + { + "id": "7827ece1-7c86-46f1-8596-1b77ea179e87", + "name": "postgres", + "databaseName": "postgres", + "description": null, + "status": "READY", + "databaseStatus": "READY", + "parentDatabaseId": "4b86551d-168f-405b-a888-89ac9082bdff", + "parentLinkedDatabaseId": "6e3733cf-2994-49d2-945c-c1873564be97", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "dateCreated": "2023-02-28 07:18:16", + "dateModified": "2023-02-28 07:18:16", + "timeZone": null, + "info": { + "secureInfo": null, + "info": { + "created_by": "system" + } + }, + "metadata": null, + "metric": null, + "tags": [], + "parentDatabaseType": null, + "parentDatabaseName": null, + "snapshotId": null + }, + { + "id": "5251f347-8562-4bf3-aeb6-2105fc49cace", + "name": "prad", + "databaseName": "prad", + "description": null, + "status": "READY", + "databaseStatus": "READY", + "parentDatabaseId": "4b86551d-168f-405b-a888-89ac9082bdff", + "parentLinkedDatabaseId": "779f1f6a-502d-4ffd-9030-d21447c5ca3d", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "dateCreated": "2023-02-28 07:18:16", + "dateModified": "2023-02-28 07:18:16", + "timeZone": null, + "info": { + "secureInfo": null, + "info": { + "created_by": "user" + } + }, + "metadata": null, + "metric": null, + "tags": [], + "parentDatabaseType": null, + "parentDatabaseName": null, + "snapshotId": null + }, + { + "id": "df365e63-5b15-4d04-902f-2e871d7f339b", + "name": "template1", + "databaseName": "template1", + "description": null, + "status": "READY", + "databaseStatus": "READY", + "parentDatabaseId": "4b86551d-168f-405b-a888-89ac9082bdff", + "parentLinkedDatabaseId": "d013a63f-c9ba-4533-989d-57e57d8a4d6f", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "dateCreated": "2023-02-28 07:18:16", + "dateModified": "2023-02-28 07:18:16", + "timeZone": null, + "info": { + "secureInfo": null, + "info": { + "created_by": "system" + } + }, + "metadata": null, + "metric": null, + "tags": [], + "parentDatabaseType": null, + "parentDatabaseName": null, + "snapshotId": null + }, + { + "id": "82d14427-382e-4e3b-99e1-5359bb5f7abc", + "name": "template0", + "databaseName": "template0", + "description": null, + "status": "READY", + "databaseStatus": "READY", + "parentDatabaseId": "4b86551d-168f-405b-a888-89ac9082bdff", + "parentLinkedDatabaseId": "c18419fd-df31-4e54-b35a-ee004c0faafb", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "dateCreated": "2023-02-28 07:18:16", + "dateModified": "2023-02-28 07:18:16", + "timeZone": null, + "info": { + "secureInfo": null, + "info": { + "created_by": "system" + } + }, + "metadata": null, + "metric": null, + "tags": [], + "parentDatabaseType": null, + "parentDatabaseName": null, + "snapshotId": null + } + ], + "databases": null, +} +uuid: + description: Database clone uuid + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" +""" + +import time # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.database_clones import DatabaseClone # noqa: E402 +from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.ndb.tags import Tag # noqa: E402 +from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + + mutually_exclusive = [("name", "uuid")] + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + + new_server = dict( + name=dict(type="str", required=True), + desc=dict(type="str", required=False), + pub_ssh_key=dict(type="str", required=True, no_log=True), + password=dict(type="str", required=True, no_log=True), + cluster=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=True, + ), + network_profile=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=True, + ), + compute_profile=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=True, + ), + ) + + db_vm = dict( + create_new_server=dict(type="dict", options=new_server, required=False), + use_authorized_server=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + ) + + time_machine = dict( + name=dict(type="str", required=False), + uuid=dict(type="str", required=False), + snapshot_uuid=dict(type="str", required=False), + pitr_timestamp=dict(type="str", required=False), + timezone=dict(type="str", default="Asia/Calcutta", required=False), + ) + + postgres = dict( + db_password=dict(type="str", required=True, no_log=True), + pre_clone_cmd=dict(type="str", required=False), + post_clone_cmd=dict(type="str", required=False), + ) + + removal_schedule = dict( + state=dict( + type="str", choices=["present", "absent"], default="present", required=False + ), + days=dict(type="int", required=False), + timezone=dict(type="str", required=False), + delete_database=dict(type="bool", default=False, required=False), + timestamp=dict(type="str", required=False), + remind_before_in_days=dict(type="int", required=False), + ) + + refresh_schedule = dict( + state=dict( + type="str", choices=["present", "absent"], default="present", required=False + ), + days=dict(type="int", required=False), + timezone=dict(type="str", required=False), + time=dict(type="str", required=False), + ) + + module_args = dict( + uuid=dict(type="str", required=False), + name=dict(type="str", required=False), + desc=dict(type="str", required=False), + db_params_profile=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + db_vm=dict( + type="dict", + options=db_vm, + mutually_exclusive=[("create_new_server", "use_authorized_server")], + required=False, + ), + time_machine=dict( + type="dict", + options=time_machine, + mutually_exclusive=[("snapshot_uuid", "pitr_timestamp")], + required=False, + ), + postgres=dict(type="dict", options=postgres, required=False), + tags=dict(type="dict", required=False), + removal_schedule=dict( + type="dict", + options=removal_schedule, + mutually_exclusive=[ + ("days", "timestamp"), + ], + required=False, + ), + refresh_schedule=dict(type="dict", options=refresh_schedule, required=False), + delete_from_vm=dict(type="bool", required=False), + soft_remove=dict(type="bool", required=False), + ) + return module_args + + +def get_clone_spec(module, result, time_machine_uuid): + + # create database instance obj + db_clone = DatabaseClone(module=module) + + spec, err = db_clone.get_spec(create=True) + if err: + result["error"] = err + module.fail_json(msg="Failed getting database clone spec", **result) + + # populate database engine related spec + spec, err = db_clone.get_db_engine_spec(spec) + if err: + result["error"] = err + err_msg = "Failed getting database engine related spec for database clone" + module.fail_json(msg=err_msg, **result) + + # populate database instance related spec + db_server_vms = DBServerVM(module) + + provision_new_server = ( + True if module.params.get("db_vm", {}).get("create_new_server") else False + ) + use_athorized_server = not provision_new_server + + kwargs = { + "time_machine_uuid": time_machine_uuid, + "db_clone": True, + "provision_new_server": provision_new_server, + "use_authorized_server": use_athorized_server, + } + + spec, err = db_server_vms.get_spec(old_spec=spec, **kwargs) + if err: + result["error"] = err + module.fail_json(msg="Failed getting vm spec for database clone", **result) + + # populate tags related spec + tags = Tag(module) + spec, err = tags.get_spec(old_spec=spec, associate_to_entity=True, type="CLONE") + if err: + result["error"] = err + module.fail_json( + msg="Failed getting spec for tags for database clone", **result + ) + + return spec + + +def create_db_clone(module, result): + db_clone = DatabaseClone(module) + time_machine = TimeMachine(module) + + time_machine_config = module.params.get("time_machine") + if not time_machine_config: + return module.fail_json( + msg="time_machine is required field for create", **result + ) + time_machine_uuid, err = time_machine.get_time_machine_uuid(time_machine_config) + if err: + result["error"] = err + module.fail_json( + msg="Failed getting time machine uuid for database clone", **result + ) + spec = get_clone_spec(module, result, time_machine_uuid=time_machine_uuid) + + if module.check_mode: + result["response"] = spec + return + + resp = db_clone.create(time_machine_uuid=time_machine_uuid, data=spec) + result["response"] = resp + result["uuid"] = resp["entityId"] + uuid = resp["entityId"] + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(5) # to get operation ID functional + operations.wait_for_completion(ops_uuid, delay=15) + resp = db_clone.read(uuid) + db_clone.format_response(resp) + result["response"] = resp + + result["changed"] = True + + +def check_for_idempotency(old_spec, update_spec): + if ( + old_spec["name"] != update_spec["name"] + or old_spec["description"] != update_spec["description"] + ): + return False + + if update_spec.get("removeRefreshConfig") or update_spec.get("removeExpiryConfig"): + return False + + if old_spec["lcmConfig"].get("expiryDetails") != update_spec["lcmConfig"].get( + "expiryDetails" + ): + return False + + if old_spec["lcmConfig"].get("refreshDetails") != update_spec["lcmConfig"].get( + "refreshDetails" + ): + return False + + if len(old_spec["tags"]) != len(update_spec["tags"]): + return False + + old_tag_values = {} + new_tag_values = {} + for i in range(len(old_spec["tags"])): + old_tag_values[old_spec["tags"][i]["tagName"]] = old_spec["tags"][i]["value"] + new_tag_values[update_spec["tags"][i]["tagName"]] = update_spec["tags"][i][ + "value" + ] + + if old_tag_values != new_tag_values: + return False + + return True + + +def update_db_clone(module, result): + _clones = DatabaseClone(module) + + uuid = module.params.get("uuid") + if not uuid: + module.fail_json(msg="uuid is required field for update", **result) + result["uuid"] = uuid + + resp = _clones.read(uuid) + old_spec = _clones.get_default_update_spec(override_spec=resp) + + spec, err = _clones.get_spec(old_spec=old_spec, update=True) + if err: + result["error"] = err + module.fail_json(msg="Failed generating update database clone spec", **result) + + # populate tags related spec + if module.params.get("tags"): + tags = Tag(module) + spec, err = tags.get_spec(old_spec=spec, associate_to_entity=True, type="CLONE") + if err: + result["error"] = err + module.fail_json( + msg="Failed getting spec for tags for updating database clone", **result + ) + + if module.check_mode: + result["response"] = spec + return + + if check_for_idempotency(old_spec, spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + resp = _clones.update(data=spec, uuid=uuid) + _clones.format_response(resp) + result["response"] = resp + result["uuid"] = uuid + result["changed"] = True + + +def delete_db_clone(module, result): + _clones = DatabaseClone(module) + + uuid = module.params.get("uuid") + if not uuid: + module.fail_json(msg="uuid is required field for delete", **result) + + default_spec = _clones.get_default_delete_spec() + spec, err = _clones.get_spec(old_spec=default_spec, delete=True) + if err: + result["error"] = err + module.fail_json( + msg="Failed getting spec for deleting database clone", **result + ) + + if module.check_mode: + result["response"] = spec + return + + resp = _clones.delete(uuid, data=spec) + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + time.sleep(5) # to get operation ID functional + operations = Operation(module) + resp = operations.wait_for_completion(ops_uuid, delay=5) + + result["response"] = resp + result["changed"] = True + + +def run_module(): + mutually_exclusive_list = [ + ("uuid", "db_params_profile"), + ("uuid", "db_vm"), + ("uuid", "postgres"), + ("uuid", "time_machine"), + ("delete_from_vm", "soft_remove"), + ] + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + mutually_exclusive=mutually_exclusive_list, + required_if=[ + ("state", "present", ("name", "uuid"), True), + ("state", "absent", ("uuid",)), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "uuid": None} + if module.params["state"] == "present": + if module.params.get("uuid"): + update_db_clone(module, result) + else: + create_db_clone(module, result) + else: + delete_db_clone(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_database_log_catchup.py b/plugins/modules/ntnx_ndb_database_log_catchup.py new file mode 100644 index 000000000..122c1414b --- /dev/null +++ b/plugins/modules/ntnx_ndb_database_log_catchup.py @@ -0,0 +1,162 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_database_log_catchup +short_description: module for performing log catchups action +version_added: 1.8.0 +description: module for performing log catchups action +options: + time_machine_uuid: + description: + - time machine UUID + type: str + required: true + for_restore: + description: + - enable this flag if log catchup is to be done for restore process + type: bool + default: false +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: perform log catchup + ntnx_ndb_database_log_catchup: + time_machine_uuid: "{{time_machine_uuid}}" + register: result + +- name: perform log catchup for restore + ntnx_ndb_database_log_catchup: + time_machine_uuid: "{{time_machine_uuid}}" + for_restore: true + register: result +""" +RETURN = r""" +response: + description: An intentful representation of a task status post log catchup + returned: always + type: dict + sample: { + "entityName": "OWZWuxlTgBhX-time-machine", + "work": null, + "stepGenEnabled": false, + "setStartTime": false, + "timeZone": "UTC", + "id": "92e426d8-680c-4c93-8042-63c97aafa818", + "name": "Performing Log Catchup before Restore Instance OWZWuxlTgBhX on host xx.xx.xx.xx", + "uniqueName": null, + "type": "perform_log_catchup", + "startTime": "2023-02-27 19:12:21", + "timeout": 70, + "timeoutInfo": { + "timeoutTimestamp": "2023-02-27 20:22:21", + "timeRemaining": 0, + "timeout": 70, + "timezone": "UTC" + }, + "endTime": "2023-02-27 19:14:07", + "instanceId": null, + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "status": "5", + "percentageComplete": "100", + "steps": [{}, {}], + "properties": [], + "parentId": null, + "parentStep": 0, + "message": null, + "metadata": {}, + "entityId": "5da0150a-c476-4fce-9ce2-cc8f28e652e5", + "entityType": "ERA_TIME_MACHINE", + "systemTriggered": false, + "userVisible": true, + "dbserverId": "4a19a165-d682-4ca3-b740-826ac206c18b", + "dateSubmitted": "2023-02-27 19:12:17", + "deferredBy": null, + "deferredByOpIds": null, + "scheduleTime": null, + "isInternal": false, + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "dbserverStatus": "DELETED", + "childOperations": [], + "ancestorOpIds": null, + "userRequestedAction": "0", + "userRequestedActionTime": null +} + +""" + + +import time # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + module_args = dict( + time_machine_uuid=dict(type="str", required=True), + for_restore=dict(type="bool", required=False, default=False), + ) + return module_args + + +def log_catchup(module, result): + time_machine_uuid = module.params.get("time_machine_uuid") + if not time_machine_uuid: + return module.fail_json(msg="time_machine_uuid is required for log catchups") + + time_machine = TimeMachine(module) + for_restore = module.params.get("for_restore") + spec = time_machine.get_log_catchup_spec(for_restore) + if module.check_mode: + result["response"] = spec + return + + resp = time_machine.log_catchup(time_machine_uuid=time_machine_uuid, data=spec) + result["response"] = resp + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + time.sleep(5) # to get operation ID functional + operations = Operation(module) + resp = operations.wait_for_completion(ops_uuid) + result["response"] = resp + + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None} + + log_catchup(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_database_restore.py b/plugins/modules/ntnx_ndb_database_restore.py new file mode 100644 index 000000000..1660d2328 --- /dev/null +++ b/plugins/modules/ntnx_ndb_database_restore.py @@ -0,0 +1,186 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_database_restore +short_description: module for restoring database instance +version_added: 1.8.0 +description: + - module for restoring database instance to certain point in time or snapshot + - module will use latest snapshot if pitr timestamp or snapshot uuid is not given +options: + pitr_timestamp: + description: + - timestamp of point in time restore + - "format: 'yyyy-mm-dd hh:mm:ss'" + - mutually exclusive with C(snapshot_uuid) + type: str + snapshot_uuid: + description: + - snapshot uuid for restore + - mutually exclusive with C(pitr_timestamp) + type: str + timezone: + description: + - timezone related to given C(pitr_timestamp) + type: str + db_uuid: + description: + - database instance uuid + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: perform restore using latest snapshot + ntnx_ndb_database_restore: + db_uuid: "{{db_uuid}}" + snapshot_uuid: "{{snapshot_uuid}}" + register: result + +- name: perform restore using snapshot uuid + ntnx_ndb_database_restore: + db_uuid: "{{db_uuid}}" + snapshot_uuid: "{{snapshot_uuid}}" + register: result + +- name: perform restore using pitr + ntnx_ndb_database_restore: + db_uuid: "{{db_uuid}}" + pitr_timestamp: "{{snapshot_uuid}}" + timezone: "UTC" + register: result +""" +RETURN = r""" +response: + description: An intentful representation of a task status post restore + returned: always + type: dict + sample: { + "entityName": "OWZWuxlTgBhX", + "work": null, + "stepGenEnabled": false, + "setStartTime": false, + "timeZone": "UTC", + "id": "4cdf6937-6f99-4662-9f46-46c1ad7e83b2", + "name": "Restore Postgres Instance to Snapshot a1d5afdb-5890-4b41-a0e1-e6e79cad70cf", + "uniqueName": null, + "type": "restore_database", + "startTime": "2023-02-27 19:25:48", + "timeout": 250, + "timeoutInfo": { + "timeoutTimestamp": "2023-02-27 23:35:48", + "timeRemaining": 0, + "timeout": 250, + "timezone": "UTC" + }, + "endTime": "2023-02-27 19:33:50", + "instanceId": null, + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "status": "5", + "percentageComplete": "100", + "steps": [], + "properties": [], + "parentId": null, + "parentStep": 0, + "message": null, + "metadata": {}, + "entityId": "117760dc-c766-46f1-9ffd-126826cf37a9", + "entityType": "ERA_DATABASE", + "systemTriggered": false, + "userVisible": true, + "dbserverId": "4a19a165-d682-4ca3-b740-826ac206c18b", + "dateSubmitted": "2023-02-27 19:23:33", + "deferredBy": null, + "deferredByOpIds": null, + "scheduleTime": null, + "isInternal": false, + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "dbserverStatus": "DELETED", + "childOperations": [], + "ancestorOpIds": null, + "userRequestedAction": "0", + "userRequestedActionTime": null +} + +""" +import time # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + + module_args = dict( + snapshot_uuid=dict(type="str", required=False), + pitr_timestamp=dict(type="str", required=False), + db_uuid=dict(type="str", required=True), + timezone=dict(type="str", required=False), + ) + return module_args + + +def restore_database(module, result): + db = DatabaseInstance(module) + db_uuid = module.params.get("db_uuid") + if not db_uuid: + module.fail_json(msg="db_uuid is required field for restoring", **result) + + spec = db.get_restore_spec(module.params) + + if module.check_mode: + result["response"] = spec + return + + resp = db.restore(uuid=db_uuid, data=spec) + result["response"] = resp + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + time.sleep(5) # to get operation ID functional + operations = Operation(module) + resp = operations.wait_for_completion(ops_uuid) + result["response"] = resp + + result["changed"] = True + result["db_uuid"] = db_uuid + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_together=[("pitr_timestamp", "timezone")], + mutually_exclusive=[("snapshot_uuid", "pitr_timestamp")], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "db_uuid": None} + + restore_database(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_database_scale.py b/plugins/modules/ntnx_ndb_database_scale.py new file mode 100644 index 000000000..98dc2aa0b --- /dev/null +++ b/plugins/modules/ntnx_ndb_database_scale.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_database_scale +short_description: module for scaling database instance +version_added: 1.8.0 +description: + - module for scaling database instance + - currently, extension of database storage is only supported +options: + pre_update_cmd: + description: + - complete OS command that you want to run before scaling + type: str + post_update_cmd: + description: + - complete OS command that you want to run post scaling + type: str + storage_gb: + description: + - storage to be added in GB + type: int + required: true + db_uuid: + description: + - database instance uuid + type: str + required: true +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: extend database storage for scaling database + ntnx_ndb_database_scale: + db_uuid: "{{db_uuid}}" + storage_gb: 2 + pre_update_cmd: "ls" + post_update_cmd: "ls -a" + register: result +""" +RETURN = r""" +response: + description: An intentful representation of a task status post scaling + returned: always + type: dict + sample: { + "entityName": "OWZWuxlTgBhX", + "work": null, + "stepGenEnabled": false, + "setStartTime": false, + "timeZone": "UTC", + "id": "8778ef1b-9278-4f0e-a80a-7be5d8998e86", + "name": "Extend Database Storage", + "uniqueName": null, + "type": "extend_database_storage", + "startTime": "2023-02-27 19:36:39", + "timeout": 70, + "timeoutInfo": { + "timeoutTimestamp": "2023-02-27 20:46:39", + "timeRemaining": 0, + "timeout": 70, + "timezone": "UTC" + }, + "endTime": "2023-02-27 19:42:42", + "instanceId": null, + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "status": "5", + "percentageComplete": "100", + "steps": [], + "properties": [], + "parentId": null, + "parentStep": 0, + "message": null, + "metadata": {}, + "entityId": "117760dc-c766-46f1-9ffd-126826cf37a9", + "entityType": "ERA_DATABASE", + "systemTriggered": false, + "userVisible": true, + "dbserverId": "4a19a165-d682-4ca3-b740-826ac206c18b", + "dateSubmitted": "2023-02-27 19:34:25", + "deferredBy": null, + "deferredByOpIds": null, + "scheduleTime": null, + "isInternal": false, + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "dbserverStatus": "DELETED", + "childOperations": [], + "ancestorOpIds": null, + "userRequestedAction": "0", + "userRequestedActionTime": null +} + +""" +import time # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + + module_args = dict( + db_uuid=dict(type="str", required=True), + storage_gb=dict(type="int", required=True), + pre_update_cmd=dict(type="str", required=False), + post_update_cmd=dict(type="str", required=False), + ) + return module_args + + +def scale_db_instance(module, result): + _databases = DatabaseInstance(module) + uuid = module.params.get("db_uuid") + if not uuid: + module.fail_json(msg="db_uuid is required field for scaling", **result) + + resp = _databases.read(uuid) + result["response"] = resp + + database_type = resp.get("type") + if not database_type: + module.fail_json(msg="failed fetching database type", **result) + + spec = _databases.get_scaling_spec( + scale_config=module.params, database_type=database_type + ) + + if module.check_mode: + result["response"] = spec + return + + resp = _databases.scale(uuid=uuid, data=spec) + result["response"] = resp + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + time.sleep(5) # to get operation ID functional + operations = Operation(module) + resp = operations.wait_for_completion(ops_uuid) + result["response"] = resp + + result["changed"] = True + result["db_uuid"] = uuid + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "db_uuid": None} + + scale_db_instance(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_database_snapshots.py b/plugins/modules/ntnx_ndb_database_snapshots.py new file mode 100644 index 000000000..b4720f672 --- /dev/null +++ b/plugins/modules/ntnx_ndb_database_snapshots.py @@ -0,0 +1,412 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_database_snapshots +short_description: module for creating, updating and deleting database snapshots +version_added: 1.8.0 +description: + - module for creating, updating and deleting database snapshots + - currently, this module only polls for snapshot create in primary source cluster + - it doesn't poll for replication task if multiple clusters are mentioned + - check_mode is not supported for snapshot update +options: + snapshot_uuid: + description: + - snapshot uuid for delete or update + type: str + name: + description: + - name of snaphsot. + - required for create + - update is allowed + type: str + clusters: + description: + - list of clusters incase snapshots needs to be replicated to secondary clusters + - if secondary clusters of time machines are mentioned, then this module won't track the replication process + - clusters changes are not considered during update, for replication use ntnx_ndb_replicate_database_snapshots + type: list + elements: dict + suboptions: + name: + description: + - name of cluster + - mutually_exclusive with C(uuid) + type: str + uuid: + description: + - uuid of cluster + - mutually_exclusive with c(name) + type: str + expiry_days: + description: + - expiry in days + type: int + remove_expiry: + description: + - use this flag for removing expiry schedule of snapshot + type: bool + time_machine_uuid: + description: + - time machine uuid + - required for creation + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + + +EXAMPLES = r""" +- name: create snapshot with expiry + ntnx_ndb_database_snapshots: + name: "{{snapshot_name}}2" + time_machine_uuid: "{{time_machine_uuid}}" + expiry_days: 4 + register: result + +- name: create snapshot on secondary cluster + ntnx_ndb_database_snapshots: + name: "ansible-created-snapshot-on-{{cluster.cluster2.name}}" + time_machine_uuid: "{{time_machine.uuid}}" + clusters: + - name: "{{cluster.cluster2.name}}" + register: result +""" + +RETURN = r""" +response: + description: An intentful representation of a snapshot + returned: always + type: dict + sample: { + "id": "10a7fb55-bda4-4f09-9797-70af1f90e137", + "name": "test_snapshot", + "description": null, + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "dateCreated": "2023-02-28 07:24:25", + "dateModified": "2023-02-28 09:27:51", + "accessLevel": null, + "properties": [], + "tags": [], + "snapshotId": "119454", + "snapshotUuid": "119454", + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "protectionDomainId": "6f555d69-df00-4cad-a714-2a96042fec5a", + "parentSnapshotId": null, + "timeMachineId": "7a39664b-dfb7-4529-887c-6d91f7e18604", + "databaseNodeId": "07073f2c-8d90-437b-9bf4-ab02a11ff01d", + "appInfoVersion": "4", + "status": "ACTIVE", + "type": "MANUAL", + "applicableTypes": [ + "MANUAL" + ], + "snapshotTimeStamp": "2023-02-28 07:23:58", + "info": { + "secureInfo": null, + "info": null, + "linkedDatabases": [ + { + "id": "d013a63f-c9ba-4533-989d-57e57d8a4d6f", + "databaseName": "template1", + "status": "READY", + "info": { + "info": { + "created_by": "system" + } + }, + "appConsistent": false, + "message": null, + "clone": false + }, + { + "id": "c18419fd-df31-4e54-b35a-ee004c0faafb", + "databaseName": "template0", + "status": "READY", + "info": { + "info": { + "created_by": "system" + } + }, + "appConsistent": false, + "message": null, + "clone": false + }, + { + "id": "779f1f6a-502d-4ffd-9030-d21447c5ca3d", + "databaseName": "prad", + "status": "READY", + "info": { + "info": { + "created_by": "user" + } + }, + "appConsistent": false, + "message": null, + "clone": false + }, + { + "id": "6e3733cf-2994-49d2-945c-c1873564be97", + "databaseName": "postgres", + "status": "READY", + "info": { + "info": { + "created_by": "system" + } + }, + "appConsistent": false, + "message": null, + "clone": false + } + ], + "databases": null, + "databaseGroupId": null, + "missingDatabases": null, + "replicationHistory": null + }, + "metadata": { + "secureInfo": null, + "info": null, + "deregisterInfo": null, + "fromTimeStamp": "2023-02-28 07:23:58", + "toTimeStamp": "2023-02-28 07:23:58", + "replicationRetryCount": 0, + "lastReplicationRetryTimestamp": null, + "lastReplicationRetrySourceSnapshotId": null, + "async": false, + "standby": false, + "curationRetryCount": 0, + "operationsUsingSnapshot": [] + }, + "metric": { + "lastUpdatedTimeInUTC": null, + "storage": { + "lastUpdatedTimeInUTC": null, + "controllerNumIops": null, + "controllerAvgIoLatencyUsecs": null, + "size": 3.5749888E7, + "allocatedSize": 0.0, + "usedSize": 0.0, + "unit": "B" + } + }, + "softwareSnapshotId": "e08b73dd-9503-4053-8a01-4bfe59f3feb4", + "softwareDatabaseSnapshot": false, + "dbServerStorageMetadataVersion": 2, + "sanitised": false, + "sanitisedFromSnapshotId": null, + "timeZone": "UTC", + "processed": false, + "databaseSnapshot": false, + "fromTimeStamp": "2023-02-28 07:23:58", + "toTimeStamp": "2023-02-28 07:23:58", + "dbserverId": null, + "dbserverName": null, + "dbserverIp": null, + "replicatedSnapshots": null, + "softwareSnapshot": null, + "sanitisedSnapshots": null, + "snapshotFamily": null, + "snapshotTimeStampDate": 1677569038000, + "lcmConfig": null, + "parentSnapshot": true, + "snapshotSize": 3.5749888E7 +} +snapshot_uuid: + description: snapshot uuid + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" +""" +import time # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.ndb.snapshots import Snapshot # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + mutually_exclusive = [("name", "uuid")] + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + + module_args = dict( + snapshot_uuid=dict(type="str", required=False), + name=dict(type="str", required=False), + time_machine_uuid=dict(type="str", required=False), + clusters=dict( + type="list", + elements="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + expiry_days=dict(type="int", required=False), + remove_expiry=dict(type="bool", required=False), + ) + return module_args + + +# Notes: +# 1. Currently we only poll for source snapshot create. Replication task is not polled. + +# Create snapshot +def create_snapshot(module, result): + time_machine_uuid = module.params.get("time_machine_uuid") + if not time_machine_uuid: + return module.fail_json( + msg="time_machine_uuid is required for creating snapshot" + ) + + snapshots = Snapshot(module) + spec, err = snapshots.get_spec() + if err: + result["error"] = err + module.fail_json(msg="Failed generating snapshot create spec", **result) + + if module.check_mode: + result["response"] = spec + return + + resp = snapshots.create_snapshot(time_machine_uuid, spec) + result["response"] = resp + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(3) + operations.wait_for_completion(ops_uuid, delay=5) + + # get snapshot info after its finished + resp, err = snapshots.get_snapshot( + time_machine_uuid=time_machine_uuid, name=module.params.get("name") + ) + if err: + result["error"] = err + module.fail_json( + msg="Failed fetching snapshot info post creation", **result + ) + result["response"] = resp + result["snapshot_uuid"] = resp.get("id") + + result["changed"] = True + + +def verify_snapshot_expiry_idempotency(old_spec, new_spec): + if old_spec.get("expireInDays") != new_spec.get("expireInDays"): + return False + return True + + +def update_snapshot(module, result): + uuid = module.params.get("snapshot_uuid") + if not uuid: + module.fail_json(msg="snapshot_uuid is required field for update", **result) + + # get current details of snapshot + _snapshot = Snapshot(module) + snapshot = _snapshot.read(uuid=uuid) + + # compare and update accordingly + updated = False + + # check if rename is required + if module.params.get("name") and module.params.get("name") != snapshot.get("name"): + spec = _snapshot.get_rename_snapshot_spec(name=module.params["name"]) + snapshot = _snapshot.rename_snapshot(uuid=uuid, data=spec) + updated = True + + # check if update/removal of expiry schedule is required + if module.params.get("remove_expiry"): + spec = _snapshot.get_remove_expiry_spec(uuid=uuid, name=snapshot.get("name")) + snapshot = _snapshot.remove_expiry(uuid=uuid, data=spec) + updated = True + + elif module.params.get("expiry_days"): + spec = _snapshot.get_expiry_update_spec(config=module.params) + lcm_config = snapshot.get("lcmConfig", {}) or {} + expiry_details = lcm_config.get("expiryDetails", {}) + if not verify_snapshot_expiry_idempotency( + expiry_details, spec.get("lcmConfig", {}).get("expiryDetails", {}) + ): + snapshot = _snapshot.update_expiry(uuid, spec) + updated = True + + if not updated: + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + snapshot = _snapshot.read( + uuid=uuid, query={"load-replicated-child-snapshots": True} + ) + result["snapshot_uuid"] = uuid + result["response"] = snapshot + result["changed"] = True + + +# Delete snapshot +def delete_snapshot(module, result): + snapshot_uuid = module.params.get("snapshot_uuid") + if not snapshot_uuid: + module.fail_json(msg="snapshot_uuid is required field for delete", **result) + + snapshots = Snapshot(module) + resp = snapshots.delete(uuid=snapshot_uuid) + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(3) # to get ops ID functional + resp = operations.wait_for_completion(ops_uuid, delay=2) + + result["response"] = resp + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + mutually_exclusive=[ + ("snapshot_uuid", "time_machine_uuid"), + ("remove_expiry", "expiry_days"), + ], + required_if=[ + ("state", "present", ("name", "snapshot_uuid"), True), + ("state", "present", ("snapshot_uuid", "time_machine_uuid"), True), + ("state", "absent", ("snapshot_uuid",)), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "snapshot_uuid": None} + + if module.params["state"] == "present": + if module.params.get("snapshot_uuid"): + update_snapshot(module, result) + else: + create_snapshot(module, result) + else: + delete_snapshot(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_databases.py b/plugins/modules/ntnx_ndb_databases.py index e9aeee9b0..6a7d76cdd 100644 --- a/plugins/modules/ntnx_ndb_databases.py +++ b/plugins/modules/ntnx_ndb_databases.py @@ -11,8 +11,11 @@ --- module: ntnx_ndb_databases short_description: Module for create, update and delete of single instance database. Currently, postgres type database is officially supported. -version_added: 1.8.0-beta.1 -description: Module for create, update and delete of single instance database in Nutanix Database Service +version_added: 1.8.0 +description: + - Module for create, update and delete of single instance database in Nutanix Database Service + - During delete, by default it will only unregister database instance. Add allowed params to change it. + - Currently, single and HA postgres instance is supported by this module options: db_uuid: description: @@ -58,6 +61,12 @@ type: str description: name of vm required: true + desc: + type: str + description: description of vm + ip: + type: str + description: assign IP address pub_ssh_key: type: str description: public ssh key for access to vm @@ -151,6 +160,7 @@ description: - uuid of registered vm - mutually_exclusive with C(name) + time_machine: description: - time machine details @@ -181,7 +191,6 @@ schedule: type: dict description: schedule for taking snapshot - required: True suboptions: daily: type: str @@ -223,21 +232,50 @@ type: bool default: true description: enable/disable auto tuning of log drive + clusters: + type: list + elements: dict + description: + - clusters for data access management in time machine + - to be used for HA instance only + suboptions: + name: + type: str + description: + - name of cluster + - mutually_exclusive with C(uuid) + uuid: + type: str + description: + - uuid of cluster + - mutually_exclusive with C(name) postgres: type: dict description: action arguments for postgres type database suboptions: + archive_wal_expire_days: + type: str + description: + - archived write ahead logs expiry days + - only allowed for HA instance + default: "-1" listener_port: type: str - description: listener port for db - required: true + description: + - listener port for db + - required for both HA and single instance + default: "5432" db_name: type: str - description: initial database name + description: + - name of initial database added + - required for both HA and single instance required: true db_password: type: str - description: postgres database password + description: + - set postgres database password + - required for both HA and single instance required: true auto_tune_staging_drive: type: bool @@ -254,10 +292,52 @@ cluster_database: type: bool default: false - description: if clustered database + description: + - this field is deprecated + - not required + patroni_cluster_name: + type: str + description: + - patroni cluster name + - required for HA instance + ha_proxy: + type: dict + description: HA proxy details, set it for HA instance + suboptions: + provision_virtual_ip: + type: bool + description: set for provision of virtual IP + default: true + write_port: + type: str + description: port number for read/write request + default: "5000" + read_port: + type: str + description: port number for read request + default: "5001" + enable_synchronous_mode: + type: bool + default: false + description: + - set to enable synchronous replication + - allowed for HA instance + enable_peer_auth: + type: bool + default: false + description: + - set to enable peer authentication + - allowed for HA instance + type: + description: + - if its a HA or singe instance + - mandatory for creation + type: str + choices: ["single", "ha"] + default: "single" db_size: type: int - description: database instance size + description: database instance size, required for single and ha instance required: true pre_create_script: type: str @@ -265,26 +345,251 @@ required: false post_create_script: type: str - description: commands to run after database instance creation + description: commands to run post database instance creation required: false + db_server_cluster: + description: + - configure db server cluster + - required when creating HA instance + - for postgres, max two HA proxy nodes are allowed + - for postgres, minimum three database nodes are required + type: dict + suboptions: + new_cluster: + description: + - configure new database server cluster + type: dict + required: true + suboptions: + name: + description: + - name of database server cluster + type: str + required: true + desc: + description: + - description of database server cluster + type: str + vms: + description: + - list configuration of new vms/nodes to be part of database server cluster + type: list + elements: dict + required: true + suboptions: + name: + description: + - name of vm + type: str + required: true + cluster: + description: + - cluster where they will be hosted + - this will overide default cluster provided for all vms + type: dict + suboptions: + name: + type: str + description: + - name of cluster + - mutually_exclusive with C(uuid) + uuid: + type: str + description: + - uuid of cluster + - mutually_exclusive with C(name) + network_profile: + description: + - network profile details + - this will overide default network profile provided for all vms + type: dict + suboptions: + name: + type: str + description: + - name of profile + - mutually_exclusive with C(uuid) + uuid: + type: str + description: + - uuid of profile + - mutually_exclusive with C(name) + compute_profile: + description: + - compute profile details for the node + - this will overide default compute profile provided for all vms + type: dict + suboptions: + name: + type: str + description: + - name of profile + - mutually_exclusive with C(uuid) + uuid: + type: str + description: + - uuid of profile + - mutually_exclusive with C(name) + role: + description: + - role of node/vm + type: str + choices: ["Primary", "Secondary"] + node_type: + description: + - type of node + type: str + choices: ["database", "haproxy"] + default: "database" + archive_log_destination: + description: + - archive log destination + type: str + ip: + description: + - assign IP address to the vm + type: str + password: + description: + - set password of above vms + type: str + required: true + pub_ssh_key: + description: + - public ssh key of user for vm access + type: str + software_profile: + description: + - software profile details + type: dict + required: true + suboptions: + name: + type: str + description: + - name of profile + - mutually_exclusive with C(uuid) + uuid: + type: str + description: + - uuid of profile + - mutually_exclusive with C(name) + version_id: + type: str + description: + - version id of software profile + - by default latest version will be used + network_profile: + description: + - network profile details + type: dict + suboptions: + name: + type: str + description: + - name of profile + - mutually_exclusive with C(uuid) + uuid: + type: str + description: + - uuid of profile + - mutually_exclusive with C(name) + compute_profile: + description: + - compute profile details for all the vms + type: dict + suboptions: + name: + type: str + description: + - name of profile + - mutually_exclusive with C(uuid) + uuid: + type: str + description: + - uuid of profile + - mutually_exclusive with C(name) + cluster: + description: + - cluster on which all vms will be hosted + type: dict + required: true + suboptions: + name: + type: str + description: + - name of cluster + - mutually_exclusive with C(uuid) + uuid: + type: str + description: + - uuid of cluster + - mutually_exclusive with C(name) + ips: + description: + - set IP address i.e. virtual IP for db server cluster + type: list + elements: dict + suboptions: + cluster: + description: + - ndb cluster details + type: dict + required: true + suboptions: + name: + type: str + description: + - name of cluster + - mutually_exclusive with C(uuid) + uuid: + type: str + description: + - uuid of cluster + - mutually_exclusive with C(name) + ip: + description: + - ip address + type: str + required: true tags: type: dict description: - dict of tag name as key and tag value as value - update allowed + - during update, given input will override existing tags auto_tune_staging_drive: type: bool + default: true description: - enable/disable auto tuning of stage drive - enabled by default soft_delete: type: bool description: - - only unregister from era in delete process + - to be used with C(state) = absent + - unregister from ndb without any process - if not provided, database instance from db server VM will be deleted + delete_db_from_vm: + type: bool + description: + - to be used with C(state) = absent + - delete database data from vm delete_time_machine: type: bool - description: delete time machine as well in delete process + description: + - to be used with C(state) = absent + - delete time machine as well in delete process + delete_db_server_vms: + type: bool + description: + - to be used with C(state) = absent + - this will delete DB server vms or DB server cluster of database instance + unregister_db_server_vms: + type: bool + description: + - to be used with C(state) = absent + - this will unregister DB server vms or DB server cluster of database instance timeout: description: - timeout for polling database operations in seconds @@ -292,55 +597,190 @@ type: int required: false default: 2100 + automated_patching: + description: + - configure automated patching using maintenance windows + type: dict + suboptions: + maintenance_window: + description: + - maintenance window details + type: dict + suboptions: + name: + description: + - name of maintenance window + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of maintenance window + - mutually exclusive with C(name) + type: str + tasks: + description: + - list of maintenance pre-post tasks + type: list + elements: dict + suboptions: + type: + description: + - type of patching + type: str + choices: ["OS_PATCHING", "DB_PATCHING"] + pre_task_cmd: + description: + - full os command which needs to run before patching task in db server vm + type: str + post_task_cmd: + description: + - full os command which needs to run after patching task in db server vm + type: str extends_documentation_fragment: - nutanix.ncp.ntnx_ndb_base_module - nutanix.ncp.ntnx_operations author: - Prem Karat (@premkarat) - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) """ EXAMPLES = r""" -- name: Create postgres database instance using with new vm +- name: create single instance postgres database on new db server vm ntnx_ndb_databases: - name: "test" + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" db_params_profile: - name: "TEST_PROFILE" + name: "{{db_params_profile.name}}" db_vm: create_new_server: - name: "test-vm" - password: "test-vm-password" + ip: "{{ vm_ip }}" + name: "{{ vm1_name }}" + desc: "vm for db server" + password: "{{ vm_password }}" cluster: - name: "EraCluster" + name: "{{cluster.cluster1.name}}" software_profile: - name: "TEST_SOFTWARE_PROFILE" + name: "{{ software_profile.name }}" network_profile: - name: "TEST_NETWORK_PROFILE" + name: "{{ static_network_profile.name }}" compute_profile: - name: "TEST_COMPUTE_PROFILE" - pub_ssh_key: "" + name: "{{ compute_profile.name }}" + pub_ssh_key: "{{ public_ssh_key }}" postgres: listener_port: "5432" - db_name: ansible_test - db_password: "postgres-test-password" + db_name: testAnsible + db_password: "{{ vm_password }}" + db_size: 200 + type: "single" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + tags: + ansible-databases: "single-instance-dbs" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + register: result + +- name: create HA instance postgres database with multicluster vms + ntnx_ndb_databases: + timeout: 5400 + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "{{postgres_ha_profiles.db_params_profile.name}}" + + db_server_cluster: + new_cluster: + name: "{{cluster1_name}}" + cluster: + name: "{{cluster.cluster1.name}}" + software_profile: + name: "{{ postgres_ha_profiles.software_profile.name }}" + network_profile: + name: "{{ postgres_ha_profiles.multicluster_network_profile.name }}" + compute_profile: + name: "{{ postgres_ha_profiles.compute_profile.name }}" + password: "{{vm_password}}" + pub_ssh_key: "{{public_ssh_key}}" + vms: + + - name: "{{cluster1_name}}-vm-1" + node_type: "database" + role: "Primary" + + - name: "{{cluster1_name}}-vm-2" + node_type: "database" + role: "Secondary" + + - name: "{{cluster1_name}}-vm-3" + cluster: + name: "{{cluster.cluster2.name}}" + node_type: "database" + role: "Secondary" + + postgres: + type: "ha" + db_name: testAnsible + db_password: "{{ vm_password }}" db_size: 200 + patroni_cluster_name: "patroni_cluster" time_machine: - name: POSTGRES_SERVER_PRAD_TM_1 + name: TM1 + desc: TM-desc sla: - name: "TEST_SLA" + name: "{{ sla.name }}" schedule: daily: "11:10:02" weekly: WEDNESDAY monthly: 4 quaterly: JANUARY - yearly: FEBRUARY log_catchup: 30 snapshots_per_day: 2 - register: db + clusters: + - name: "{{cluster.cluster1.name}}" + - uuid: "{{cluster.cluster2.uuid}}" + tags: + ansible-databases: "ha-instance-dbs" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: result """ RETURN = r""" @@ -349,16 +789,11 @@ returned: always type: dict sample: { - "accessLevel": null, "category": "DB_GROUP_IMPLICIT", "clone": false, "clustered": false, - "databaseClusterType": null, - "databaseGroupStateInfo": null, - "databaseName": "POSTGRES_DATABASE_ANSIBLE", "databaseNodes": [ { - "accessLevel": null, "databaseId": "e9374379-de51-4cc8-8d12-b1b6eb64d129", "databaseStatus": "READY", "dateCreated": "2022-10-19 18:49:25", @@ -383,8 +818,6 @@ "tags": [] } ], - "databaseStatus": "UNKNOWN", - "databases": null, "dateCreated": "2022-10-19 18:26:55", "dateModified": "2022-10-19 18:51:26", "dbserverLogicalClusterId": null, @@ -429,8 +862,6 @@ }, "secureInfo": {} }, - "internal": false, - "lcmConfig": null, "linkedDatabases": [ { "databaseName": "prad", @@ -525,30 +956,7 @@ "timeZone": null } ], - "metadata": { - "baseSizeComputed": false, - "capabilityResetTime": null, - "createdDbservers": null, - "deregisterInfo": null, - "deregisteredWithDeleteTimeMachine": false, - "info": null, - "lastLogCatchUpForRestoreOperationId": null, - "lastRefreshTimestamp": null, - "lastRequestedRefreshTimestamp": null, - "logCatchUpForRestoreDispatched": false, - "originalDatabaseName": null, - "pitrBased": false, - "provisionOperationId": "d9b1924f-a768-4cd8-886b-7a69e61f5b89", - "refreshBlockerInfo": null, - "registeredDbservers": null, - "sanitised": false, - "secureInfo": null, - "sourceSnapshotId": null, - "stateBeforeRefresh": null, - "stateBeforeRestore": null, - "stateBeforeScaling": null, - "tmActivateOperationId": "40d6b3a3-4f57-4c17-9ba2-9279d2f247c2" - }, + "provisionOperationId": "d9b1924f-a768-4cd8-886b-7a69e61f5b89", "metric": null, "name": "POSTGRES_DATABASE_ANSIBLE", "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", @@ -631,8 +1039,16 @@ from copy import deepcopy # noqa: E402 from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 -from ..module_utils.ndb.databases import Database # noqa: E402 +from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 +from ..module_utils.ndb.db_server_cluster import DBServerCluster # noqa: E402 +from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 +from ..module_utils.ndb.maintenance_window import ( # noqa: E402 + AutomatedPatchingSpec, + MaintenanceWindow, +) from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.ndb.tags import Tag # noqa: E402 +from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 @@ -644,11 +1060,22 @@ def get_module_spec(): ) mutually_exclusive = [("name", "uuid")] entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) - software_profile = dict(version_id=dict(type="str")) - software_profile.update(deepcopy(entity_by_spec)) + automated_patching = deepcopy( + AutomatedPatchingSpec.automated_patching_argument_spec + ) + software_profile = dict( + name=dict(type="str"), uuid=dict(type="str"), version_id=dict(type="str") + ) + + ha_proxy = dict( + provision_virtual_ip=dict(type="bool", default=True, required=False), + write_port=dict(type="str", default="5000", required=False), + read_port=dict(type="str", default="5001", required=False), + ) new_server = dict( name=dict(type="str", required=True), + desc=dict(type="str", required=False), pub_ssh_key=dict(type="str", required=True, no_log=True), password=dict(type="str", required=True, no_log=True), cluster=dict( @@ -675,6 +1102,7 @@ def get_module_spec(): mutually_exclusive=mutually_exclusive, required=True, ), + ip=dict(type="str", required=False), ) db_vm = dict( @@ -687,6 +1115,83 @@ def get_module_spec(): ), ) + cluster_vm = dict( + name=dict(type="str", required=True), + cluster=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + network_profile=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + compute_profile=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + role=dict(type="str", choices=["Primary", "Secondary"], required=False), + node_type=dict( + type="str", + choices=["database", "haproxy"], + default="database", + required=False, + ), + archive_log_destination=dict(type="str", required=False), + ip=dict(type="str", required=False), + ) + cluster_ip_info = dict( + cluster=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=True, + ), + ip=dict(type="str", required=True), + ) + new_cluster = dict( + name=dict(type="str", required=True), + desc=dict(type="str", required=False), + vms=dict(type="list", elements="dict", options=cluster_vm, required=True), + password=dict(type="str", required=True, no_log=True), + pub_ssh_key=dict(type="str", required=False, no_log=True), + software_profile=dict( + type="dict", + options=software_profile, + mutually_exclusive=mutually_exclusive, + required=True, + ), + network_profile=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + compute_profile=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + cluster=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=True, + ), + ips=dict(type="list", elements="dict", options=cluster_ip_info, required=False), + ) + + # TO-DO: use_registered_clusters for oracle, ms sql, etc. + db_server_cluster = dict( + new_cluster=dict(type="dict", options=new_cluster, required=True), + ) + sla = dict( uuid=dict(type="str", required=False), name=dict(type="str", required=False), @@ -711,18 +1216,33 @@ def get_module_spec(): mutually_exclusive=mutually_exclusive, required=True, ), - schedule=dict(type="dict", options=schedule, required=True), + schedule=dict(type="dict", options=schedule, required=False), auto_tune_log_drive=dict(type="bool", required=False, default=True), + clusters=dict( + type="list", + elements="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), ) postgres = dict( - listener_port=dict(type="str", required=True), + type=dict( + type="str", choices=["single", "ha"], default="single", required=False + ), + listener_port=dict(type="str", default="5432", required=False), db_name=dict(type="str", required=True), db_password=dict(type="str", required=True, no_log=True), auto_tune_staging_drive=dict(type="bool", default=True, required=False), allocate_pg_hugepage=dict(type="bool", default=False, required=False), auth_method=dict(type="str", default="md5", required=False), cluster_database=dict(type="bool", default=False, required=False), + patroni_cluster_name=dict(type="str", required=False), + ha_proxy=dict(type="dict", options=ha_proxy, required=False), + enable_synchronous_mode=dict(type="bool", default=False, required=False), + archive_wal_expire_days=dict(type="str", default="-1", required=False), + enable_peer_auth=dict(type="bool", default=False, required=False), ) postgres.update(deepcopy(default_db_arguments)) @@ -742,38 +1262,127 @@ def get_module_spec(): mutually_exclusive=[("create_new_server", "use_registered_server")], required=False, ), + db_server_cluster=dict( + type="dict", + options=db_server_cluster, + required=False, + ), time_machine=dict(type="dict", options=time_machine, required=False), postgres=dict(type="dict", options=postgres, required=False), tags=dict(type="dict", required=False), - auto_tune_staging_drive=dict(type="bool", required=False), + auto_tune_staging_drive=dict(type="bool", default=True, required=False), + automated_patching=dict( + type="dict", options=automated_patching, required=False + ), soft_delete=dict(type="bool", required=False), + delete_db_from_vm=dict(type="bool", required=False), delete_time_machine=dict(type="bool", required=False), + unregister_db_server_vms=dict(type="bool", required=False), + delete_db_server_vms=dict(type="bool", required=False), ) return module_args -def create_instance(module, result): - _databases = Database(module) +def get_provision_spec(module, result, ha=False): - name = module.params["name"] - uuid, err = _databases.get_uuid(name) - if uuid: - module.fail_json( - msg="Database instance with given name already exists", **result + # create database instance obj + db_instance = DatabaseInstance(module=module) + + # get default spec + spec = db_instance.get_default_provision_spec() + + if ha: + # populate DB server VM cluster related spec + db_server_cluster = DBServerCluster(module=module) + spec, err = db_server_cluster.get_spec( + old_spec=spec, db_instance_provision=True ) + if err: + result["error"] = err + err_msg = "Failed getting db server vm cluster spec for database instance" + module.fail_json(msg=err_msg, **result) + else: + # populate VM related spec + db_vm = DBServerVM(module=module) + + provision_new_server = ( + True if module.params.get("db_vm", {}).get("create_new_server") else False + ) + use_registered_server = not provision_new_server + + kwargs = { + "provision_new_server": provision_new_server, + "use_registered_server": use_registered_server, + "db_instance_provision": True, + } + spec, err = db_vm.get_spec(old_spec=spec, **kwargs) + if err: + result["error"] = err + err_msg = "Failed getting vm spec for database instance" + module.fail_json(msg=err_msg, **result) + + # populate database engine related spec + spec, err = db_instance.get_db_engine_spec(spec, provision=True) + if err: + result["error"] = err + err_msg = "Failed getting database engine related spec for database instance" + module.fail_json(msg=err_msg, **result) + + # populate database instance related spec + spec, err = db_instance.get_spec(old_spec=spec, provision=True) + if err: + result["error"] = err + module.fail_json(msg="Failed getting spec for database instance", **result) - spec, err = _databases.get_spec() + # populate time machine related spec + time_machine = TimeMachine(module) + spec, err = time_machine.get_spec(old_spec=spec) if err: result["error"] = err + err_msg = "Failed getting spec for time machine for database instance" + module.fail_json(msg=err_msg, **result) + + # populate tags related spec + tags = Tag(module) + spec, err = tags.get_spec(old_spec=spec, associate_to_entity=True, type="DATABASE") + if err: + result["error"] = err + module.fail_json( + msg="Failed getting spec for tags for database instance", **result + ) + + # configure automated patching only during create + if module.params.get("automated_patching") and not module.params.get("uuid"): + + mw = MaintenanceWindow(module) + mw_spec, err = mw.get_spec(configure_automated_patching=True) + if err: + result["error"] = err + err_msg = "Failed getting spec for automated patching for new database instance creation" + module.fail_json(msg=err_msg, **result) + spec["maintenanceTasks"] = mw_spec + return spec + + +def create_instance(module, result): + db_instance = DatabaseInstance(module) + name = module.params["name"] + uuid, err = db_instance.get_uuid(name) + if uuid: module.fail_json( - msg="Failed generating create database instance spec", **result + msg="Database instance with given name already exists", **result ) + ha = False + if module.params.get("db_server_cluster"): + ha = True + + spec = get_provision_spec(module, result, ha=ha) if module.check_mode: result["response"] = spec return - resp = _databases.create(data=spec) + resp = db_instance.provision(data=spec) result["response"] = resp result["db_uuid"] = resp["entityId"] db_uuid = resp["entityId"] @@ -783,7 +1392,9 @@ def create_instance(module, result): operations = Operation(module) time.sleep(5) # to get operation ID functional operations.wait_for_completion(ops_uuid) - resp = _databases.read(db_uuid) + query = {"detailed": True, "load-dbserver-cluster": True} + resp = db_instance.read(db_uuid, query=query) + db_instance.format_response(resp) result["response"] = resp result["changed"] = True @@ -814,7 +1425,7 @@ def check_for_idempotency(old_spec, update_spec): def update_instance(module, result): - _databases = Database(module) + _databases = DatabaseInstance(module) uuid = module.params.get("db_uuid") if not uuid: @@ -823,49 +1434,94 @@ def update_instance(module, result): resp = _databases.read(uuid) old_spec = _databases.get_default_update_spec(override_spec=resp) - update_spec, err = _databases.get_spec(old_spec=old_spec) - - # due to field name changes - if update_spec.get("databaseDescription"): - update_spec["description"] = update_spec.pop("databaseDescription") - + spec, err = _databases.get_spec(old_spec=old_spec, update=True) if err: result["error"] = err module.fail_json( msg="Failed generating update database instance spec", **result ) + # populate tags related spec + if module.params.get("tags"): + tags = Tag(module) + spec, err = tags.get_spec( + old_spec=spec, associate_to_entity=True, type="DATABASE" + ) + if err: + result["error"] = err + err_msg = "Failed getting spec for tags for updating database instance" + module.fail_json(msg=err_msg, **result) + if module.check_mode: - result["response"] = update_spec + result["response"] = spec return - if check_for_idempotency(old_spec, update_spec): + if check_for_idempotency(old_spec, spec): result["skipped"] = True module.exit_json(msg="Nothing to change.") - resp = _databases.update(data=update_spec, uuid=uuid) + _databases.update(data=spec, uuid=uuid) + + query = {"detailed": True, "load-dbserver-cluster": True} + resp = _databases.read(uuid, query=query) + _databases.format_response(resp) + result["response"] = resp result["db_uuid"] = uuid result["changed"] = True +def delete_db_servers(module, result, database_info): + """ + This method deletes the associated database server vms or cluster database delete + """ + if module.params.get("unregister_db_server_vms") or module.params.get( + "delete_db_server_vms" + ): + db_servers = None + uuid = None + if database_info.get("clustered", False): + db_servers = DBServerCluster(module) + uuid = database_info.get("dbserverlogicalCluster", {}).get( + "dbserverClusterId" + ) + else: + db_servers = DBServerVM(module) + database_nodes = database_info.get("databaseNodes") + if database_nodes: + uuid = database_nodes[0].get("dbserverId") + + if not uuid: + module.fail_json( + msg="Failed fetching uuid of associated db server vm or db server cluster", + ) + + spec = db_servers.get_default_delete_spec( + delete=module.params.get("delete_db_server_vms", False) + ) + resp = db_servers.delete(uuid=uuid, data=spec) + + ops_uuid = resp["operationId"] + time.sleep(5) # to get operation ID functional + operations = Operation(module) + resp = operations.wait_for_completion(ops_uuid, delay=5) + + if not result.get("response"): + result["response"] = {} + result["response"]["db_server_vms_delete_status"] = resp + + def delete_instance(module, result): - _databases = Database(module) + _databases = DatabaseInstance(module) uuid = module.params.get("db_uuid") if not uuid: module.fail_json(msg="uuid is required field for delete", **result) - spec = _databases.get_default_delete_spec() - if module.params.get("soft_delete"): - spec["remove"] = True - spec["delete"] = False - else: - spec["delete"] = True - spec["remove"] = False + query = {"detailed": True, "load-dbserver-cluster": True} + database = _databases.read(uuid, query=query) - if module.params.get("delete_time_machine"): - spec["deleteTimeMachine"] = True + spec = _databases.get_delete_spec() if module.check_mode: result["response"] = spec @@ -877,9 +1533,12 @@ def delete_instance(module, result): ops_uuid = resp["operationId"] time.sleep(5) # to get operation ID functional operations = Operation(module) - resp = operations.wait_for_completion(ops_uuid) + resp = operations.wait_for_completion(ops_uuid, delay=15) + result["response"] = resp + + # delete db server vms or cluster only when database cleanup has finished + delete_db_servers(module, result, database_info=database) - result["response"] = resp result["changed"] = True diff --git a/plugins/modules/ntnx_ndb_databases_info.py b/plugins/modules/ntnx_ndb_databases_info.py index 22ce95253..a9e5430b7 100644 --- a/plugins/modules/ntnx_ndb_databases_info.py +++ b/plugins/modules/ntnx_ndb_databases_info.py @@ -11,8 +11,10 @@ --- module: ntnx_ndb_databases_info short_description: info module for ndb database instances -version_added: 1.8.0-beta.1 -description: 'Get database instance info' +version_added: 1.8.0 +description: + - Get database instance info + - If name or uuid is not given then it will fetch all database instances options: name: description: @@ -20,10 +22,45 @@ type: str uuid: description: - - database id + - database instance uuid type: str + filters: + description: + - filters for database instance info + type: dict + suboptions: + detailed: + description: + - get detailed response + type: bool + load_dbserver_cluster: + description: + - load db serverv cluster in response + type: bool + order_by_dbserver_cluster: + description: + - order response by db server cluster + type: bool + order_by_dbserver_logical_cluster: + description: + - order response by db server logical cluster + type: bool + value: + description: + - value for given C(value_type) + type: str + value_type: + description: + - value type for given C(value) + - filter response based on given value type and value + type: str + choices: ["ip","name","database-name"] + time_zone: + description: + - timezone related to C(pitr_timestamp) + type: str extends_documentation_fragment: - - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_ndb_info_base_module author: - Prem Karat (@premkarat) - Gevorg Khachatryan (@Gevorg-Khachatryan-97) @@ -58,6 +95,13 @@ uuid: "" register: result +- name: Get era databases using its id and detailed response + ntnx_ndb_databases_info: + filters: + detailed: True + uuid: "" + register: result + no_log: true """ RETURN = r""" response: @@ -660,27 +704,51 @@ """ from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.databases import Database # noqa: E402 +from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 +from ..module_utils.utils import format_filters_map # noqa: E402 def get_module_spec(): + filters_spec = dict( + detailed=dict(type="bool"), + load_dbserver_cluster=dict(type="bool"), + order_by_dbserver_cluster=dict(type="bool"), + order_by_dbserver_logical_cluster=dict(type="bool"), + value=dict(type="str"), + value_type=dict( + type="str", + choices=[ + "ip", + "name", + "database-name", + ], + ), + time_zone=dict(type="str"), + ) module_args = dict( name=dict(type="str"), uuid=dict(type="str"), + filters=dict( + type="dict", + options=filters_spec, + ), ) return module_args def get_database(module, result): - database = Database(module) + database = DatabaseInstance(module) + query_params = module.params.get("filters") + query_params = format_filters_map(query_params) + if module.params.get("name"): name = module.params["name"] - resp, err = database.get_database(name=name) + resp, err = database.get_database(name=name, query=query_params) else: uuid = module.params["uuid"] - resp, err = database.get_database(uuid=uuid) + resp, err = database.get_database(uuid=uuid, query=query_params) if err: result["error"] = err @@ -689,9 +757,11 @@ def get_database(module, result): def get_databases(module, result): - database = Database(module) + database = DatabaseInstance(module) + query_params = module.params.get("filters") + query_params = format_filters_map(query_params) - resp = database.read() + resp = database.read(query=query_params) result["response"] = resp diff --git a/plugins/modules/ntnx_ndb_db_server_vms.py b/plugins/modules/ntnx_ndb_db_server_vms.py new file mode 100644 index 000000000..d8a57c320 --- /dev/null +++ b/plugins/modules/ntnx_ndb_db_server_vms.py @@ -0,0 +1,741 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +DOCUMENTATION = r""" +--- +module: ntnx_ndb_db_server_vms +short_description: module for create, delete and update of database server vms +version_added: 1.8.0 +description: + - module for create, delete and update of database server vms +options: + state: + description: + - when C(state)=present and uuid given, it will update db server vm + - when C(state)=present and uuid not given, it will create new db server vm + - when C(state)=absent, by default it will perform unregistration process unless supported arguments are given + name: + description: + - name of database server vm + - update allowed + type: str + uuid: + description: + - uuid of database server vm for updating or deleting vm + type: str + desc: + description: + - description of vm + - allowed for update + type: str + reset_name_in_ntnx_cluster: + description: + - set this to reset name of vm to name in ndb in cluster as well + type: bool + default: false + reset_desc_in_ntnx_cluster: + description: + - set this to reset description of vm to name in ndb in cluster as well + type: bool + default: false + cluster: + description: + - ndb cluster where the vm will be hosted + - required for create + type: dict + suboptions: + name: + description: + - name of cluster + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of cluster + - mutually exclusive with C(name) + type: str + network_profile: + description: + - network profile details + - required for create + type: dict + suboptions: + name: + description: + - name of profile + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of profile + - mutually exclusive with C(name) + type: str + compute_profile: + description: + - required for create + - compute profile details + type: dict + suboptions: + name: + description: + - name of profile + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of profile + - mutually exclusive with C(name) + type: str + software_profile: + description: + - use software profile as source for creating vm + - required for create + - either name or uuid is mandatory + type: dict + suboptions: + name: + description: + - name of profile + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of profile + - mutually exclusive with C(name) + type: str + version_uuid: + description: + - version UUID for softwware profile + - if not given then latest version will be used + type: str + time_machine: + description: + - use time machine as source for creating vm + - either name or uuid is mandatory + type: dict + suboptions: + name: + description: + - name of time machine + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of time machine + - mutually exclusive with C(name) + type: str + snapshot_uuid: + description: + - source snapshot uuid + - required for create + type: str + password: + description: + - password of vm + type: str + pub_ssh_key: + description: + - give access using user's public ssh key + type: str + time_zone: + description: + - timezone of vm + type: str + default: "Asia/Calcutta" + database_type: + description: + - database engine type + type: str + choices: ["postgres_database"] + automated_patching: + description: + - configure automated patching using maintenance windows + - to be only used while creation + type: dict + suboptions: + maintenance_window: + description: + - maintenance window details + type: dict + suboptions: + name: + description: + - name of maintenance window + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of maintenance window + - mutually exclusive with C(name) + type: str + tasks: + description: + - list of maintenance pre-post tasks + type: list + elements: dict + suboptions: + type: + description: + - type of patching + type: str + choices: ["OS_PATCHING", "DB_PATCHING"] + pre_task_cmd: + description: + - full os command which needs to run before patching task in db server vm + type: str + post_task_cmd: + description: + - full os command which needs to run after patching task in db server vm + type: str + tags: + description: + - dict of tag name as key and tag value as value + - update allowed + - during update, given input will override existing tags + type: dict + update_credentials: + description: + - update credentials of vm in ndb + - this update should be done post vm credentials update in source cluster + type: list + elements: dict + suboptions: + username: + description: + - username + type: str + required: true + password: + description: + - password + type: str + required: true + delete_from_cluster: + description: + - set this during c(state) = absent to delete vm from source cluster + type: bool + default: False + delete_vgs: + description: + - set this during c(state) = absent to delete volume groups from source cluster + type: bool + default: False + delete_vm_snapshots: + description: + - set this during c(state) = absent to delete vm's snapshot from source cluster + type: bool + default: False + soft_remove: + description: + - set this during c(state) = absent to perform soft remove of database server vm + type: bool + default: False +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +""" +RETURN = r""" +response: + description: database server intent response + returned: always + type: dict + sample: { + "id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "test-setup-dnd", + "description": "DBServer for test-setup-dnd", + "dateCreated": "2023-02-24 07:42:55", + "dateModified": "2023-02-28 09:44:34", + "properties": [ + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "software_profile_version_id", + "value": "ab966132-7d7d-4418-b89d-dc814c2ef1b3", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "current_op_id", + "value": "32536509-0ca0-4475-a347-016c23855bfd", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "isEraCreated", + "value": "true", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "software_home", + "value": "/usr/pgsql-10.4", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "vm_ip_address_list", + "value": "xx.xx.xx.xx", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "working_dir", + "value": "/tmp", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "os_type", + "value": "linux", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "application_type", + "value": "postgres_database", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "application_version", + "value": "10.4", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "os_info", + "value": "Linux test-setup-dnd 5.10.0-1.el7.elrepo.x86_64 #1 SMP Sun Dec 13 18:34:48 EST 2020 x86_64 x86_64 x86_64 GNU/Linux\n", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "node_type", + "value": "database", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "era_base", + "value": "/opt/era_base", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "era_user", + "value": "era", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "compute_profile_id", + "value": "19b1241e-d4e0-411e-abfc-6639ba713d13", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "network_profile_id", + "value": "6cf4fe44-5030-41a5-a0cd-4e62a55cd85a", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "software_profile_id", + "value": "96b3c1a2-4427-41c1-87eb-a942c52247a2", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "vm_cpu_count", + "value": "1", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "vm_core_count", + "value": "1", + "secure": false, + "description": null + } + ], + "tags": [], + "eraCreated": true, + "dbserverClusterId": null, + "vmClusterName": "test-setup-dnd", + "vmClusterUuid": "1626600d-aa20-438e-94e8-3d3f0a5c948f", + "ipAddresses": [ + "10.44.78.125" + ], + "fqdns": null, + "macAddresses": [ + "" + ], + "type": "DBSERVER", + "status": "UP", + "clientId": "147e09d5-53fd-4da8-8a46-6c82d7ab5c6e", + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "eraDriveId": "44dcffdf-235b-465f-b07f-ad253c26d93b", + "eraVersion": "2.5.1", + "vmTimeZone": "UTC", + "vmInfo": { + "secureInfo": null, + "info": null, + "deregisterInfo": null, + "osType": null, + "osVersion": null, + "distribution": null, + "networkInfo": [ + { + "vlanName": "vlan.sds", + "vlanUuid": "61213511-6383-4a38-9ac8-4a552c0e5865", + "vlanType": "Static", + } + ] + }, + "info": null, + "metric": null, + "clustered": false, + "requestedVersion": null, + "is_server_driven": false, + "associated_time_machine_id": null, + "time_machine_info": null, + "eraDrive": null, + "databases": null, + "clones": null, + "accessKey": null, + "softwareInstallations": null, + "protectionDomainId": "ef185e83-fc47-4111-bff5-3e5f003bb610", + "protectionDomain": null, + "queryCount": 0, + "databaseType": "postgres_database", + "dbserverInValidEaState": true, + "workingDirectory": "/tmp", + "validDiagnosticBundleState": true, + "windowsDBServer": false, + "associatedTimeMachineIds": null, + "accessKeyId": "ed3c5a82-c5c1-4728-85e1-d38cba63c107" +} +uuid: + description: created db server UUID + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" +""" + +import time # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 +from ..module_utils.ndb.maintenance_window import ( # noqa: E402 + AutomatedPatchingSpec, + MaintenanceWindow, +) +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.ndb.tags import Tag # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + mutually_exclusive = [("name", "uuid")] + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + + automated_patching = deepcopy( + AutomatedPatchingSpec.automated_patching_argument_spec + ) + + software_profile = dict( + name=dict(type="str"), uuid=dict(type="str"), version_uuid=dict(type="str") + ) + time_machine = dict( + name=dict(type="str", required=False), + uuid=dict(type="str", required=False), + snapshot_uuid=dict(type="str", required=False), + ) + credential = dict( + username=dict(type="str", required=True), + password=dict(type="str", required=True, no_log=True), + ) + module_args = dict( + uuid=dict(type="str", required=False), + name=dict(type="str", required=False), + desc=dict(type="str", required=False), + reset_name_in_ntnx_cluster=dict(type="bool", default=False, required=False), + reset_desc_in_ntnx_cluster=dict(type="bool", default=False, required=False), + cluster=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + network_profile=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + compute_profile=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + software_profile=dict( + type="dict", + options=software_profile, + mutually_exclusive=mutually_exclusive, + required=False, + ), + time_machine=dict( + type="dict", + options=time_machine, + mutually_exclusive=mutually_exclusive, + required=False, + ), + password=dict(type="str", required=False, no_log=True), + pub_ssh_key=dict(type="str", required=False, no_log=True), + time_zone=dict(type="str", default="Asia/Calcutta", required=False), + database_type=dict(type="str", choices=["postgres_database"], required=False), + tags=dict(type="dict", required=False), + update_credentials=dict( + type="list", elements="dict", options=credential, required=False + ), + automated_patching=dict( + type="dict", options=automated_patching, required=False + ), + delete_from_cluster=dict(type="bool", default=False, required=False), + delete_vgs=dict(type="bool", default=False, required=False), + delete_vm_snapshots=dict(type="bool", default=False, required=False), + soft_remove=dict(type="bool", default=False, required=False), + ) + return module_args + + +def get_provision_spec(module, result): + db_servers = DBServerVM(module) + + default_spec = db_servers.get_default_spec_for_provision() + spec, err = db_servers.get_spec(old_spec=default_spec, provision_new_server=True) + + if err: + result["error"] = err + module.fail_json("Failed getting DB server vm create spec", **result) + + # populate tags related spec + if module.params.get("tags"): + tags = Tag(module) + spec, err = tags.get_spec( + spec, associate_to_entity=True, type="DATABASE_SERVER" + ) + if err: + result["error"] = err + err_msg = "Failed getting spec for tags for new db server vm" + module.fail_json(msg=err_msg, **result) + + # configure automated patching + if module.params.get("automated_patching"): + mw = MaintenanceWindow(module) + mw_spec, err = mw.get_spec(configure_automated_patching=True) + if err: + result["error"] = err + err_msg = "Failed getting spec for automated patching for new db server vm" + module.fail_json(msg=err_msg, **result) + spec["maintenanceTasks"] = mw_spec + + return spec + + +def create_db_server(module, result): + db_servers = DBServerVM(module) + + spec = get_provision_spec(module, result) + if module.check_mode: + result["response"] = spec + return + + resp = db_servers.provision(data=spec) + result["response"] = resp + result["uuid"] = resp["entityId"] + db_uuid = resp["entityId"] + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(5) # to get operation ID functional + operations.wait_for_completion(ops_uuid) + resp = db_servers.read(db_uuid) + db_servers.format_response(resp) + result["response"] = resp + + result["changed"] = True + + +def check_idempotency(old_spec, new_spec): + + # check for arguments + args = ["name", "description"] + for arg in args: + if old_spec[arg] != new_spec[arg]: + return False + + # check for resets + args = ["resetDescriptionInNxCluster", "resetNameInNxCluster", "resetCredential"] + for arg in args: + if new_spec.get(arg, False): + return False + + # check for new tags + if len(old_spec["tags"]) != len(new_spec["tags"]): + return False + + old_tag_values = {} + new_tag_values = {} + for i in range(len(old_spec["tags"])): + old_tag_values[old_spec["tags"][i]["tagName"]] = old_spec["tags"][i]["value"] + new_tag_values[new_spec["tags"][i]["tagName"]] = new_spec["tags"][i]["value"] + + if old_tag_values != new_tag_values: + return False + + return True + + +def update_db_server(module, result): + db_servers = DBServerVM(module) + + uuid = module.params.get("uuid") + if not uuid: + module.fail_json("'uuid' is required for updating db server vm") + + db_server = db_servers.read(uuid=uuid) + update_spec = db_servers.get_default_spec_for_update(override=db_server) + update_spec, err = db_servers.get_spec(old_spec=update_spec, update=True) + if err: + result["error"] = err + module.fail_json("Failed getting db server vm update spec", **result) + + # populate tags related spec + if module.params.get("tags"): + tags = Tag(module) + update_spec, err = tags.get_spec( + update_spec, associate_to_entity=True, type="DATABASE_SERVER" + ) + if err: + result["error"] = err + err_msg = "Failed getting spec for tags for db server vm update" + module.fail_json(msg=err_msg, **result) + update_spec["resetTags"] = True + + if module.check_mode: + result["response"] = update_spec + return + + if check_idempotency(old_spec=db_server, new_spec=update_spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + resp = db_servers.update(data=update_spec, uuid=uuid) + db_servers.format_response(resp) + result["response"] = resp + result["uuid"] = uuid + result["changed"] = True + + +def delete_db_server(module, result): + db_servers = DBServerVM(module) + + uuid = module.params.get("uuid") + if not uuid: + module.fail_json("'uuid' is required for deleting db server vm") + + spec = db_servers.get_default_delete_spec() + spec, err = db_servers.get_spec(old_spec=spec, delete=True) + if err: + result["error"] = err + module.fail_json("Failed getting db server delete update spec", **result) + + spec["remove"] = not spec["delete"] + + if module.check_mode: + result["response"] = spec + return + + resp = db_servers.delete(data=spec, uuid=uuid) + result["response"] = resp + + if module.params.get("wait") and resp.get("operationId"): + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(5) # to get operation ID functional + resp = operations.wait_for_completion(ops_uuid, delay=5) + result["response"] = resp + + result["changed"] = True + + +def run_module(): + mutually_exclusive_list = [ + ("uuid", "database_type"), + ("uuid", "time_zone"), + ("uuid", "pub_ssh_key"), + ("uuid", "password"), + ("uuid", "time_machine"), + ("uuid", "cluster"), + ("uuid", "network_profile"), + ("uuid", "software_profile"), + ("uuid", "compute_profile"), + ] + module = NdbBaseModule( + argument_spec=get_module_spec(), + mutually_exclusive=mutually_exclusive_list, + required_if=[ + ("state", "present", ("name", "uuid"), True), + ("state", "absent", ("uuid",)), + ], + supports_check_mode=True, + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "uuid": None} + if module.params["state"] == "present": + if module.params.get("uuid"): + update_db_server(module, result) + else: + create_db_server(module, result) + else: + delete_db_server(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_db_servers_info.py b/plugins/modules/ntnx_ndb_db_servers_info.py index 753d8bd54..fd18f8f20 100644 --- a/plugins/modules/ntnx_ndb_db_servers_info.py +++ b/plugins/modules/ntnx_ndb_db_servers_info.py @@ -11,23 +11,67 @@ --- module: ntnx_ndb_db_servers_info short_description: info module for ndb db server vms info -version_added: 1.8.0-beta.1 -description: 'Get database server info' +version_added: 1.8.0 +description: + - Get database server info + - Module will fetch all entities if no spec is given options: name: description: - - server name + - database server vm name type: str uuid: description: - - server id + - database server vm uuid type: str server_ip: description: - db server vm ip type: str + filters: + description: + - query filters + type: dict + suboptions: + detailed: + description: + - flag to get detailed response + type: bool + load_clones: + description: + - flag to include clones in response + type: bool + load_databases: + description: + - flag to include databases in response + type: bool + load_dbserver_cluster: + description: + - flag to include db server cluster details in response + type: bool + load_metrics: + description: + - flag to include metrics + type: bool + curator: + description: + - get entity if its satisfies the query criteria irrespective of status + type: bool + value: + description: + - vlaue as per C(value_type) + type: str + value_type: + description: + - type of C(value) + type: str + choices: ["ip","name","vm-cluster-name","vm-cluster-uuid", "dbserver-cluster-id","nx-cluster-id", "fqdn",] + time_zone: + description: + - timezone related to C(pitr_timestamp) + type: str extends_documentation_fragment: - - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_ndb_info_base_module author: - Prem Karat (@premkarat) - Gevorg Khachatryan (@Gevorg-Khachatryan-97) @@ -339,28 +383,65 @@ """ from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.db_servers import DBServers # noqa: E402 +from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 +from ..module_utils.utils import format_filters_map # noqa: E402 def get_module_spec(): + filters_spec = dict( + detailed=dict(type="bool"), + load_clones=dict(type="bool"), + load_databases=dict(type="bool"), + load_dbserver_cluster=dict(type="bool"), + load_metrics=dict(type="bool"), + curator=dict(type="bool"), + value=dict(type="str"), + value_type=dict( + type="str", + choices=[ + "ip", + "name", + "vm-cluster-name", + "vm-cluster-uuid", + "dbserver-cluster-id", + "nx-cluster-id", + "fqdn", + ], + ), + time_zone=dict(type="str"), + ) + module_args = dict( name=dict(type="str"), uuid=dict(type="str"), server_ip=dict(type="str"), + filters=dict( + type="dict", + options=filters_spec, + ), ) return module_args def get_db_server(module, result): - db_server = DBServers(module) + db_server = DBServerVM(module) + query_params = module.params.get("filters") + query_params = format_filters_map(query_params) + if module.params.get("uuid"): - resp, err = db_server.get_db_server(uuid=module.params["uuid"]) + resp, err = db_server.get_db_server( + uuid=module.params["uuid"], query=query_params + ) elif module.params.get("name"): - resp, err = db_server.get_db_server(name=module.params["name"]) + resp, err = db_server.get_db_server( + name=module.params["name"], query=query_params + ) else: - resp, err = db_server.get_db_server(ip=module.params["server_ip"]) + resp, err = db_server.get_db_server( + ip=module.params["server_ip"], query=query_params + ) if err: result["error"] = err @@ -370,9 +451,11 @@ def get_db_server(module, result): def get_db_servers(module, result): - db_server = DBServers(module) + db_server = DBServerVM(module) + query_params = module.params.get("filters") + query_params = format_filters_map(query_params) - resp = db_server.read() + resp = db_server.read(query=query_params) result["response"] = resp diff --git a/plugins/modules/ntnx_ndb_linked_databases.py b/plugins/modules/ntnx_ndb_linked_databases.py new file mode 100644 index 000000000..5dbabbaa6 --- /dev/null +++ b/plugins/modules/ntnx_ndb_linked_databases.py @@ -0,0 +1,331 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_linked_databases +short_description: module to manage linked databases of a database instance +version_added: 1.8.0 +description: module to manage linked databases of a database instance +options: + state: + description: + - when C(state)=present, it will create databases in database instance + - when C(state)=absent, it will delete linked database with database_uuid + db_instance_uuid: + description: + - database instance uuid + type: str + required: true + database_uuid: + description: + - linked database uuid + - should be used with c(state)=absent, to delete linked database + type: str + databases: + description: + - list of database's name to be added in database instance + type: list + elements: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: add databases in database instance + ntnx_ndb_linked_databases: + db_instance_uuid: "{{db_uuid}}" + databases: + - test1 + - test2 + register: result + +- name: remove linked databases from database instance + ntnx_ndb_linked_databases: + state: "absent" + db_instance_uuid: "{{db_uuid}}" + database_uuid: "{{linked_databases.test1}}" + register: result +""" +RETURN = r""" +response: + description: list of linked databases in database instance + returned: always + type: dict + sample: [ + { + "databaseName": "template1", + "databaseStatus": "READY", + "dateCreated": "2023-02-24 08:07:01", + "dateModified": "2023-02-24 08:07:01", + "description": null, + "id": "d013a63f-c9ba-4533-989d-57e57d8a4d6f", + "info": { + "info": { + "created_by": "system" + }, + "secureInfo": null + }, + "metadata": null, + "metric": null, + "name": "template1", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentDatabaseId": "50b8ce26-62ba-443b-a6b6-8739373f81eb", + "parentDatabaseName": null, + "parentDatabaseType": null, + "parentLinkedDatabaseId": null, + "snapshotId": null, + "status": "READY", + "tags": [], + "timeZone": null + }, + { + "databaseName": "template0", + "databaseStatus": "READY", + "dateCreated": "2023-02-24 08:07:01", + "dateModified": "2023-02-24 08:07:01", + "description": null, + "id": "c18419fd-df31-4e54-b35a-ee004c0faafb", + "info": { + "info": { + "created_by": "system" + }, + "secureInfo": null + }, + "metadata": null, + "metric": null, + "name": "template0", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentDatabaseId": "50b8ce26-62ba-443b-a6b6-8739373f81eb", + "parentDatabaseName": null, + "parentDatabaseType": null, + "parentLinkedDatabaseId": null, + "snapshotId": null, + "status": "READY", + "tags": [], + "timeZone": null + }, + { + "databaseName": "prad", + "databaseStatus": "READY", + "dateCreated": "2023-02-24 08:07:01", + "dateModified": "2023-02-24 08:07:01", + "description": null, + "id": "779f1f6a-502d-4ffd-9030-d21447c5ca3d", + "info": { + "info": { + "created_by": "user" + }, + "secureInfo": null + }, + "metadata": null, + "metric": null, + "name": "prad", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentDatabaseId": "50b8ce26-62ba-443b-a6b6-8739373f81eb", + "parentDatabaseName": null, + "parentDatabaseType": null, + "parentLinkedDatabaseId": null, + "snapshotId": null, + "status": "READY", + "tags": [], + "timeZone": null + }, + { + "databaseName": "postgres", + "databaseStatus": "READY", + "dateCreated": "2023-02-24 08:07:01", + "dateModified": "2023-02-24 08:07:01", + "description": null, + "id": "6e3733cf-2994-49d2-945c-c1873564be97", + "info": { + "info": { + "created_by": "system" + }, + "secureInfo": null + }, + "metadata": null, + "metric": null, + "name": "postgres", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentDatabaseId": "50b8ce26-62ba-443b-a6b6-8739373f81eb", + "parentDatabaseName": null, + "parentDatabaseType": null, + "parentLinkedDatabaseId": null, + "snapshotId": null, + "status": "READY", + "tags": [], + "timeZone": null + }, + { + "databaseName": "ansible1-new", + "databaseStatus": "READY", + "dateCreated": "2023-02-28 09:53:27", + "dateModified": "2023-02-28 09:53:51", + "description": null, + "id": "742e41b9-1766-47ef-9c2c-97aadeac8c0f", + "info": { + "info": { + "created_by": "user" + }, + "secureInfo": null + }, + "metadata": null, + "metric": null, + "name": "ansible1-new", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentDatabaseId": "50b8ce26-62ba-443b-a6b6-8739373f81eb", + "parentDatabaseName": null, + "parentDatabaseType": null, + "parentLinkedDatabaseId": null, + "snapshotId": null, + "status": "READY", + "tags": [], + "timeZone": null + }, + { + "databaseName": "ansible2-new", + "databaseStatus": "READY", + "dateCreated": "2023-02-28 09:53:27", + "dateModified": "2023-02-28 09:53:51", + "description": null, + "id": "04d4b431-e75f-4a14-86ad-674f447d6aec", + "info": { + "info": { + "created_by": "user" + }, + "secureInfo": null + }, + "metadata": null, + "metric": null, + "name": "ansible2-new", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentDatabaseId": "50b8ce26-62ba-443b-a6b6-8739373f81eb", + "parentDatabaseName": null, + "parentDatabaseType": null, + "parentLinkedDatabaseId": null, + "snapshotId": null, + "status": "READY", + "tags": [], + "timeZone": null + } + ] +database_instance_uuid: + description: database instance uuid + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" +""" +import time # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + + module_args = dict( + db_instance_uuid=dict(type="str", required=True), + database_uuid=dict(type="str", required=False), + databases=dict(type="list", elements="str", required=False), + ) + return module_args + + +def add_database(module, result): + instance_uuid = module.params.get("db_instance_uuid") + if not instance_uuid: + err_msg = "db_instance_uuid is required field for adding databases to database instance" + return module.fail_json(msg=err_msg, **result) + result["db_instance_uuid"] = instance_uuid + + _databases = DatabaseInstance(module) + databases = module.params.get("databases") + if not databases: + return module.exit_json(msg="No database to add", **result) + + spec = _databases.get_add_database_spec(databases) + if module.check_mode: + result["response"] = spec + return + + resp = _databases.add_databases(instance_uuid, spec) + result["response"] = resp + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + time.sleep(3) # to get operation ID functional + operations = Operation(module) + operations.wait_for_completion(ops_uuid, delay=5) + resp = _databases.read(uuid=instance_uuid) + result["response"] = resp.get("linkedDatabases", []) + + result["changed"] = True + + +def remove_database(module, result): + instance_uuid = module.params.get("db_instance_uuid") + database_uuid = module.params.get("database_uuid") + if not database_uuid or not instance_uuid: + err_msg = "database_uuid and instance_uuid are required fields for deleting database from database instance" + module.fail_json(msg=err_msg, **result) + + _databases = DatabaseInstance(module) + resp = _databases.remove_linked_database( + linked_database_uuid=database_uuid, database_instance_uuid=instance_uuid + ) + result["response"] = resp + result["db_instance_uuid"] = instance_uuid + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(3) # to get ops ID functional + operations.wait_for_completion(ops_uuid, delay=5) + resp = _databases.read(uuid=instance_uuid) + result["response"] = resp.get("linkedDatabases", []) + + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + mutually_exclusive=[("databases", "database_uuid")], + ) + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "db_instance_uuid": None, + } + + if module.params["state"] == "present": + add_database(module, result) + else: + remove_database(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_maintenance_tasks.py b/plugins/modules/ntnx_ndb_maintenance_tasks.py new file mode 100644 index 000000000..1b6411f0a --- /dev/null +++ b/plugins/modules/ntnx_ndb_maintenance_tasks.py @@ -0,0 +1,400 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +DOCUMENTATION = r""" +--- +module: ntnx_ndb_maintenance_tasks +short_description: module to add and remove maintenance related tasks +version_added: 1.8.0 +description: module to add and remove maintenance related tasks +options: + db_server_vms: + description: + - list of database server vms to which maintenance tasks needs to be added + type: list + elements: dict + suboptions: + name: + description: + - name of db server vm + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of db server vm + - mutually exclusive with C(name) + type: str + + db_server_clusters: + description: + - list of database server clusters to which maintenance tasks needs to be added + type: list + elements: dict + suboptions: + name: + description: + - name of db server cluster + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of db server cluster + - mutually exclusive with C(name) + type: str + maintenance_window: + description: + - maintenance window details + type: dict + required: true + suboptions: + name: + description: + - name of maintenance window + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of maintenance window + - mutually exclusive with C(name) + type: str + tasks: + description: + - list of maintenance pre-post tasks + type: list + elements: dict + suboptions: + type: + description: + - type of patching + type: str + choices: ["OS_PATCHING", "DB_PATCHING"] + pre_task_cmd: + description: + - full os command which needs to run before patching task in db server vm + type: str + post_task_cmd: + description: + - full os command which needs to run after patching task in db server vm + type: str + +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: removing existing maintenance tasks for db server vm + ntnx_ndb_maintenance_tasks: + db_server_vms: + - uuid: "{{db_server_uuid}}" + maintenance_window: + uuid: "{{maintenance.window_uuid}}" + tasks: [] + register: result + +- name: Add maitenance window task for vm + ntnx_ndb_maintenance_tasks: + db_server_vms: + - name: "{{vm1_name_updated}}" + maintenance_window: + name: "{{maintenance.window_name}}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "python3 script.py" + post_task_cmd: "python3 script.py" + - type: "DB_PATCHING" + pre_task_cmd: "python3 script.py" + post_task_cmd: "python3 script.py" + register: result +""" +RETURN = r""" +response: + description: maintenance window response with associated tasks + returned: always + type: dict + sample: { + "accessLevel": null, + "dateCreated": "2023-02-25 06:34:44", + "dateModified": "2023-02-28 00:00:00", + "description": "anisble-created-window", + "entityTaskAssoc": [ + { + "accessLevel": null, + "dateCreated": "2023-02-28 10:39:23", + "dateModified": "2023-02-28 10:39:23", + "description": null, + "entity": { + "accessLevel": null, + "clones": [ + { + "id": "4b86551d-168f-405b-a888-89ac9082bdff", + "status": "READY" + } + ], + "clusterDescription": null, + "clusterEraCreated": false, + "clusterOwner": null, + "clusterStatus": null, + "databaseType": "postgres_database", + "databases": [], + "dateCreated": null, + "dateModified": null, + "dbClusterId": null, + "dbClusterName": null, + "description": "vm_desc", + "eraCreated": true, + "id": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "inUse": 1, + "ipAddresses": [ + "10.44.78.8" + ], + "maintenanceWindowId": "69916cc2-eb2f-4198-984a-e8a4e507d680", + "maintenanceWindowName": "OACrBshrexJV1", + "name": "postgress_server_new1", + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "properties": [ + { + "name": "compute_profile_id", + "value": "19b1241e-d4e0-411e-abfc-6639ba713d13" + }, + { + "name": "software_profile_id", + "value": "96b3c1a2-4427-41c1-87eb-a942c52247a2" + }, + { + "name": "software_profile_version_id", + "value": "ab966132-7d7d-4418-b89d-dc814c2ef1b3" + }, + { + "name": "network_profile_id", + "value": "6cf4fe44-5030-41a5-a0cd-4e62a55cd85a" + }, + { + "name": "associated_time_machine_id", + "value": "2ec7d4a9-c6e6-4f51-a4bd-1af7f8ee8ca8" + } + ], + "status": "UP", + "tags": [] + }, + "entityId": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "entityType": "ERA_DBSERVER", + "id": "889aaa2f-bc84-4202-86ec-5c7bc54a260f", + "maintenanceWindowId": "69916cc2-eb2f-4198-984a-e8a4e507d680", + "maintenanceWindowOwnerId": null, + "name": null, + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "payload": { + "prePostCommand": { + "postCommand": "os_post", + "preCommand": "os_pre" + } + }, + "properties": null, + "status": "ACTIVE", + "tags": null, + "taskType": "OS_PATCHING" + }, + { + "accessLevel": null, + "dateCreated": "2023-02-28 10:39:23", + "dateModified": "2023-02-28 10:39:23", + "description": null, + "entity": { + "accessLevel": null, + "clones": [ + { + "id": "4b86551d-168f-405b-a888-89ac9082bdff", + "status": "READY" + } + ], + "clusterDescription": null, + "clusterEraCreated": false, + "clusterOwner": null, + "clusterStatus": null, + "databaseType": "postgres_database", + "databases": [], + "dateCreated": null, + "dateModified": null, + "dbClusterId": null, + "dbClusterName": null, + "description": "vm_desc", + "eraCreated": true, + "id": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "inUse": 1, + "ipAddresses": [ + "10.44.78.8" + ], + "maintenanceWindowId": "69916cc2-eb2f-4198-984a-e8a4e507d680", + "maintenanceWindowName": "OACrBshrexJV1", + "name": "postgress_server_new1", + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "properties": [ + { + "name": "compute_profile_id", + "value": "19b1241e-d4e0-411e-abfc-6639ba713d13" + }, + { + "name": "software_profile_id", + "value": "96b3c1a2-4427-41c1-87eb-a942c52247a2" + }, + { + "name": "software_profile_version_id", + "value": "ab966132-7d7d-4418-b89d-dc814c2ef1b3" + }, + { + "name": "network_profile_id", + "value": "6cf4fe44-5030-41a5-a0cd-4e62a55cd85a" + }, + { + "name": "associated_time_machine_id", + "value": "2ec7d4a9-c6e6-4f51-a4bd-1af7f8ee8ca8" + } + ], + "status": "UP", + "tags": [] + }, + "entityId": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "entityType": "ERA_DBSERVER", + "id": "3b842672-61dd-4635-857c-606e161fed1d", + "maintenanceWindowId": "69916cc2-eb2f-4198-984a-e8a4e507d680", + "maintenanceWindowOwnerId": null, + "name": null, + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "payload": { + "prePostCommand": { + "postCommand": "db_post", + "preCommand": "db_pre" + } + }, + "properties": null, + "status": "ACTIVE", + "tags": null, + "taskType": "DB_PATCHING" + } + ], + "id": "69916cc2-eb2f-4198-984a-e8a4e507d680", + "name": "OACrBshrexJV1", + "nextRunTime": "2023-02-28 11:00:00", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "properties": null, + "schedule": { + "dayOfWeek": "TUESDAY", + "duration": 2, + "hour": 11, + "minute": 0, + "recurrence": "WEEKLY", + "startTime": "11:00:00", + "threshold": null, + "timeZone": "UTC", + "weekOfMonth": null + }, + "status": "SCHEDULED", + "tags": null, + "timezone": null + } +uuid: + description: maintenance window uuid + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" +""" + +from copy import deepcopy # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.maintenance_window import ( # noqa: E402 + AutomatedPatchingSpec, + MaintenanceWindow, +) +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + mutually_exclusive = [("name", "uuid")] + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + + automated_patching = deepcopy( + AutomatedPatchingSpec.automated_patching_argument_spec + ) + module_args = dict( + db_server_vms=dict( + type="list", + elements="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + db_server_clusters=dict( + type="list", + elements="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + ) + module_args.update(automated_patching) + + # maintenance window ID is always required for updating maintenance tasks + module_args["maintenance_window"]["required"] = True + return module_args + + +def update_maintenance_tasks(module, result): + maintenance_window = MaintenanceWindow(module) + + spec, err = maintenance_window.get_spec(configure_automated_patching=True) + if err: + result["error"] = err + err_msg = "Failed getting spec for updating maintenance tasks" + module.fail_json(msg=err_msg, **result) + + uuid = spec.get("maintenanceWindowId") + + if not uuid: + return module.fail_json(msg="Failed fetching maintenance window uuid") + + result["uuid"] = uuid + if module.check_mode: + result["response"] = spec + return + + maintenance_window.update_tasks(data=spec) + + query = {"load-task-associations": True, "load-entities": True} + resp = maintenance_window.read(uuid=uuid, query=query) + result["response"] = resp + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("db_server_vms", "db_server_clusters"), True) + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "uuid": None} + update_maintenance_tasks(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_maintenance_window.py b/plugins/modules/ntnx_ndb_maintenance_window.py new file mode 100644 index 000000000..f90e8fb96 --- /dev/null +++ b/plugins/modules/ntnx_ndb_maintenance_window.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +DOCUMENTATION = r""" +--- +module: ntnx_ndb_maintenance_window +short_description: module to create, update and delete mainetance window +version_added: 1.8.0 +description: module to create, update and delete mainetance window +options: + name: + description: + - name of maintenance window + type: str + uuid: + description: + - uuid of maintenance window + - should be used for update or delete + type: str + desc: + description: + - description of maintenance window + type: str + schedule: + description: + - schedule of maintenance + type: dict + suboptions: + recurrence: + description: + - type of recurrence + type: str + choices: ["weekly", "monthly"] + duration: + description: + - duration of window in hours + type: int + start_time: + description: + - start time of maintenance in formate 'hh:mm:ss' + type: str + timezone: + description: + - time zone related to C(start_time) + - required with start_time + type: str + week_of_month: + description: + - week of month for maitenance + type: str + day_of_week: + description: + - day of week for maitenance + type: str + +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: create window with weekly schedule + ntnx_ndb_maintenance_window: + name: "{{window1_name}}" + desc: "anisble-created-window" + schedule: + recurrence: "weekly" + duration: 2 + start_time: "11:00:00" + day_of_week: "tuesday" + timezone: "UTC" + register: result + +- name: create window with monthly schedule + ntnx_ndb_maintenance_window: + name: "{{window2_name}}" + desc: "anisble-created-window" + schedule: + recurrence: "monthly" + duration: 2 + start_time: "11:00:00" + day_of_week: "tuesday" + week_of_month: 2 + timezone: "UTC" + + register: result + +""" +RETURN = r""" +response: + description: maintenance window response with associated tasks + returned: always + type: dict + sample: { + "id": "3c8704e7-e1a7-49f9-9943-a92090f8d098", + "name": "test-check", + "description": "", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "dateCreated": "2023-02-28 10:50:16", + "dateModified": "2023-02-28 10:50:16", + "accessLevel": null, + "properties": null, + "tags": null, + "schedule": { + "startTime": "10:50:02", + "recurrence": "MONTHLY", + "threshold": null, + "hour": 10, + "minute": 50, + "dayOfWeek": "TUESDAY", + "weekOfMonth": 4, + "duration": 2, + "timeZone": "UTC" + }, + "status": "ACTIVE", + "nextRunTime": "2023-03-28 10:50:00", + "entityTaskAssoc": null, + "timezone": null + } +uuid: + description: maintenance window uuid + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" + +""" + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.maintenance_window import MaintenanceWindow # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + schedule = dict( + recurrence=dict(type="str", choices=["weekly", "monthly"], required=False), + duration=dict(type="int", required=False), # in hrs + start_time=dict(type="str", required=False), # in 24hrs format in HH:MM:SS + timezone=dict(type="str", required=False), + week_of_month=dict(type="str", required=False), + day_of_week=dict(type="str", required=False), + ) + module_args = dict( + uuid=dict(type="str", required=False), + name=dict(type="str", required=False), + desc=dict(type="str", required=False), + schedule=dict( + type="dict", + options=schedule, + required_together=[("start_time", "timezone")], + required=False, + ), + ) + return module_args + + +def create_window(module, result): + maintenance_window = MaintenanceWindow(module) + + spec, err = maintenance_window.get_spec() + if err: + result["error"] = err + err_msg = "Failed getting spec for new maintenance window" + module.fail_json(msg=err_msg, **result) + + if module.check_mode: + result["response"] = spec + return + + resp = maintenance_window.create(spec) + result["response"] = resp + result["uuid"] = resp.get("id") + result["changed"] = True + + +def check_idempotency(old_spec, new_spec): + + args = ["name", "description"] + for arg in args: + if old_spec.get(arg, "") != new_spec.get(arg, ""): + return False + + # check for schedule changes + args = ["recurrence", "dayOfWeek", "weekOfMonth", "duration", "startTime"] + for arg in args: + if old_spec.get("schedule", {}).get(arg, "") != new_spec.get( + "schedule", {} + ).get(arg, ""): + return False + return True + + +def update_window(module, result): + _maintenance_window = MaintenanceWindow(module) + + uuid = module.params.get("uuid") + if not uuid: + module.fail_json(msg="uuid is required field for update", **result) + + maintenance_window = _maintenance_window.read(uuid=uuid) + default_spec = _maintenance_window.get_default_update_spec( + override_spec=maintenance_window + ) + spec, err = _maintenance_window.get_spec(old_spec=default_spec) + if err: + result["error"] = err + err_msg = "Failed getting spec for updating maintenance window" + module.fail_json(msg=err_msg, **result) + + if module.check_mode: + result["response"] = spec + return + + # defining start_time will skip idempotency checks + if check_idempotency(old_spec=maintenance_window, new_spec=spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + resp = _maintenance_window.update(uuid=uuid, data=spec) + result["response"] = resp + result["uuid"] = uuid + result["changed"] = True + + +def delete_window(module, result): + _maintenance_window = MaintenanceWindow(module) + + uuid = module.params.get("uuid") + if not uuid: + module.fail_json(msg="uuid is required field for delete", **result) + + resp = _maintenance_window.delete(uuid=uuid, data={}) + result["response"] = resp + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "uuid": None} + if module.params.get("state") == "present": + if module.params.get("uuid"): + update_window(module, result) + else: + create_window(module, result) + else: + delete_window(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_maintenance_windows_info.py b/plugins/modules/ntnx_ndb_maintenance_windows_info.py new file mode 100644 index 000000000..b2d0c6b61 --- /dev/null +++ b/plugins/modules/ntnx_ndb_maintenance_windows_info.py @@ -0,0 +1,284 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_maintenance_windows_info +short_description: module for fetching maintenance windows info +version_added: 1.8.0 +description: + - module for fetching maintenance windows info + - it will fetch all entities if no spec is given + - it will also load entities and task associations +options: + uuid: + description: + - uuid of maintenance window + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_info_base_module +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: get certain window info + ntnx_ndb_maintenance_windows_info: + uuid: "{{window2_uuid}}" + + register: result + +- name: get all windows info + ntnx_ndb_maintenance_windows_info: + register: result +""" +RETURN = r""" +response: + description: maintenance window response with associated tasks when uuid is used. + returned: always + type: dict + sample: { + "accessLevel": null, + "dateCreated": "2023-02-25 06:34:44", + "dateModified": "2023-02-28 00:00:00", + "description": "anisble-created-window", + "entityTaskAssoc": [ + { + "accessLevel": null, + "dateCreated": "2023-02-28 10:39:23", + "dateModified": "2023-02-28 10:39:23", + "description": null, + "entity": { + "accessLevel": null, + "clones": [ + { + "id": "4b86551d-168f-405b-a888-89ac9082bdff", + "status": "READY" + } + ], + "clusterDescription": null, + "clusterEraCreated": false, + "clusterOwner": null, + "clusterStatus": null, + "databaseType": "postgres_database", + "databases": [], + "dateCreated": null, + "dateModified": null, + "dbClusterId": null, + "dbClusterName": null, + "description": "vm_desc", + "eraCreated": true, + "id": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "inUse": 1, + "ipAddresses": [ + "10.44.78.8" + ], + "maintenanceWindowId": "69916cc2-eb2f-4198-984a-e8a4e507d680", + "maintenanceWindowName": "OACrBshrexJV1", + "name": "postgress_server_new1", + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "properties": [ + { + "name": "compute_profile_id", + "value": "19b1241e-d4e0-411e-abfc-6639ba713d13" + }, + { + "name": "software_profile_id", + "value": "96b3c1a2-4427-41c1-87eb-a942c52247a2" + }, + { + "name": "software_profile_version_id", + "value": "ab966132-7d7d-4418-b89d-dc814c2ef1b3" + }, + { + "name": "network_profile_id", + "value": "6cf4fe44-5030-41a5-a0cd-4e62a55cd85a" + }, + { + "name": "associated_time_machine_id", + "value": "2ec7d4a9-c6e6-4f51-a4bd-1af7f8ee8ca8" + } + ], + "status": "UP", + "tags": [] + }, + "entityId": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "entityType": "ERA_DBSERVER", + "id": "889aaa2f-bc84-4202-86ec-5c7bc54a260f", + "maintenanceWindowId": "69916cc2-eb2f-4198-984a-e8a4e507d680", + "maintenanceWindowOwnerId": null, + "name": null, + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "payload": { + "prePostCommand": { + "postCommand": "os_post", + "preCommand": "os_pre" + } + }, + "properties": null, + "status": "ACTIVE", + "tags": null, + "taskType": "OS_PATCHING" + }, + { + "accessLevel": null, + "dateCreated": "2023-02-28 10:39:23", + "dateModified": "2023-02-28 10:39:23", + "description": null, + "entity": { + "accessLevel": null, + "clones": [ + { + "id": "4b86551d-168f-405b-a888-89ac9082bdff", + "status": "READY" + } + ], + "clusterDescription": null, + "clusterEraCreated": false, + "clusterOwner": null, + "clusterStatus": null, + "databaseType": "postgres_database", + "databases": [], + "dateCreated": null, + "dateModified": null, + "dbClusterId": null, + "dbClusterName": null, + "description": "vm_desc", + "eraCreated": true, + "id": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "inUse": 1, + "ipAddresses": [ + "10.44.78.8" + ], + "maintenanceWindowId": "69916cc2-eb2f-4198-984a-e8a4e507d680", + "maintenanceWindowName": "OACrBshrexJV1", + "name": "postgress_server_new1", + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "properties": [ + { + "name": "compute_profile_id", + "value": "19b1241e-d4e0-411e-abfc-6639ba713d13" + }, + { + "name": "software_profile_id", + "value": "96b3c1a2-4427-41c1-87eb-a942c52247a2" + }, + { + "name": "software_profile_version_id", + "value": "ab966132-7d7d-4418-b89d-dc814c2ef1b3" + }, + { + "name": "network_profile_id", + "value": "6cf4fe44-5030-41a5-a0cd-4e62a55cd85a" + }, + { + "name": "associated_time_machine_id", + "value": "2ec7d4a9-c6e6-4f51-a4bd-1af7f8ee8ca8" + } + ], + "status": "UP", + "tags": [] + }, + "entityId": "e748bcb4-a2bb-4b6b-bb9e-1cbfe7ff0e30", + "entityType": "ERA_DBSERVER", + "id": "3b842672-61dd-4635-857c-606e161fed1d", + "maintenanceWindowId": "69916cc2-eb2f-4198-984a-e8a4e507d680", + "maintenanceWindowOwnerId": null, + "name": null, + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "payload": { + "prePostCommand": { + "postCommand": "db_post", + "preCommand": "db_pre" + } + }, + "properties": null, + "status": "ACTIVE", + "tags": null, + "taskType": "DB_PATCHING" + } + ], + "id": "69916cc2-eb2f-4198-984a-e8a4e507d680", + "name": "OACrBshrexJV1", + "nextRunTime": "2023-02-28 11:00:00", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "properties": null, + "schedule": { + "dayOfWeek": "TUESDAY", + "duration": 2, + "hour": 11, + "minute": 0, + "recurrence": "WEEKLY", + "startTime": "11:00:00", + "threshold": null, + "timeZone": "UTC", + "weekOfMonth": null + }, + "status": "SCHEDULED", + "tags": null, + "timezone": null + } +uuid: + description: maintenance window uuid when queried using uuid + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" +""" + +from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.ndb.maintenance_window import MaintenanceWindow # noqa: E402 + + +def get_module_spec(): + + module_args = dict( + uuid=dict(type="str"), + ) + + return module_args + + +def get_maintenance_window(module, result): + mw = MaintenanceWindow(module) + query = {"load-task-associations": True, "load-entities": True} + resp = mw.read(uuid=module.params.get("uuid"), query=query) + result["response"] = resp + result["uuid"] = module.params.get("uuid") + + +def get_maintenance_windows(module, result): + mw = MaintenanceWindow(module) + query = {"load-task-associations": True, "load-entities": True} + resp = mw.read(query=query) + result["response"] = resp + + +def run_module(): + module = NdbBaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + ) + result = {"changed": False, "error": None, "response": None} + if module.params.get("uuid"): + get_maintenance_window(module, result) + else: + get_maintenance_windows(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_profiles.py b/plugins/modules/ntnx_ndb_profiles.py new file mode 100644 index 000000000..3c342679b --- /dev/null +++ b/plugins/modules/ntnx_ndb_profiles.py @@ -0,0 +1,1219 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_profiles +short_description: module for create, update and delete of profiles +version_added: 1.8.0 +description: + - module for create, update and delete of profiles + - currently, compute, network, database parameters and software profiles are supported + - only software profile supports versions operations + - version related operations can be configured under "software" + - only software profile supports multi cluster availibility +options: + profile_uuid: + description: + - uuid of profile for delete or update + type: str + name: + description: + - name of profile + type: str + desc: + description: + - profile description + type: str + type: + description: + - type of profile + - required for creation + type: str + choices: ["software", "compute", "network", "database_parameter"] + database_type: + description: + - database engine type + - required for database params, network and software profile + type: str + choices: ["postgres"] + compute: + description: + - for creating compute profile + - idempotency not supported + type: dict + suboptions: + publish: + description: + - set to publish the profile + - only valid during update + type: bool + vcpus: + description: + - vcpus + type: int + cores_per_cpu: + description: + - cores per vcpu + type: int + memory: + description: + - memory + type: int + clusters: + description: + - list of clusters where profiles should be available + - only applicable for software profile + type: list + elements: dict + suboptions: + name: + description: + - name of cluster + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of cluster + - mutually exclusive with C(name) + type: str + software: + description: + - software profile configuration + - during create, it will create base version + - idempotency not supported + type: dict + suboptions: + publish: + description: + - set to publish the profile + - only valid during update + type: bool + deprecate: + description: + - set to deprecate the profile + - only valid during update + type: bool + topology: + description: + - topology of profile + type: str + choices: ["single", "cluster"] + state: + description: + - when C(state)=present, it will create new version + - when C(state)=absent, it will create version as per version_uuid + type: str + choices: ["present", "absent"] + default: "present" + version_uuid: + description: + - version uuid for version update or delete + type: str + name: + description: + - name of version + type: str + desc: + description: + - description of version + type: str + notes: + description: + - notes + - update not supported + type: dict + suboptions: + os: + description: + - operating system notes in profile + type: str + db_software: + description: + - database software notes in profile + type: str + db_server_vm: + description: + - source database server vm for creating software profile + type: dict + suboptions: + name: + description: + - name of database server vm + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of database server vm + - mutually exclusive with C(name) + type: str + network: + description: + - network profile configuration + - idempotency not supported + type: dict + suboptions: + publish: + description: + - set to publish the profile + - only valid during update + type: bool + topology: + description: + - topology of profile + type: str + choices: ["single", "cluster"] + vlans: + description: + - list of vlans configuration to be added in network profile + type: list + elements: dict + suboptions: + cluster: + description: + - cluster of vlan + type: dict + required: true + suboptions: + name: + description: + - name of cluster + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of cluster + - mutually exclusive with C(name) + type: str + vlan_name: + description: + - name of vlan to be added + type: str + required: true + enable_ip_address_selection: + description: + - set to enable ip address selection + type: bool + database_parameter: + description: + - database parameter profile configuration + - idempotency not supported + type: dict + suboptions: + publish: + description: + - set to publish the profile + - only valid during update + type: bool + postgres: + description: + - database params for postgres + type: dict + suboptions: + max_connections: + description: + - max number of connections + - default is 100 + type: int + max_replication_slots: + description: + - maximum replication slots + - default is 10 + type: int + max_locks_per_transaction: + description: + - max locks per transactions + - default is 64 + type: int + effective_io_concurrency: + description: + - effective I/O concurrency + - default is 1 + type: int + timezone: + description: + - timezone + - default is 'UTC' + type: str + max_prepared_transactions: + description: + - maximum prepared transactions + - default is 0 + type: int + max_wal_senders: + description: + - max wal senders + - default 10 + type: int + min_wal_size: + description: + - max wal logs size in MB + - default is 80 + type: int + max_wal_size: + description: + - max wal logs size in GB + - default is 1 + type: int + wal_keep_segments: + description: + - wal logs keep segments + - default is 700 + type: int + max_worker_processes: + description: + - max number of worker processes + - default is 8 + type: int + checkpoint_timeout: + description: + - checkpoint time out in minutes + - default is 5 + type: int + autovacuum: + description: + - on/off autovaccum + - default is on + type: str + choices: ["on", "off"] + checkpoint_completion_target: + description: + - checkpoint completion target + - deafult is 0.5 + type: float + autovacuum_freeze_max_age: + description: + - autovacuum freeze max age + - default is 200000000 + type: int + autovacuum_vacuum_threshold: + description: + - auto vacuum threshold + - default is 50 + type: int + autovacuum_vacuum_scale_factor: + description: + - autovacuum scale factor + - default is 0.2 + type: float + autovacuum_work_mem: + description: + - autovacum work memory in KB + - default is -1 + type: int + autovacuum_max_workers: + description: + - autovacuum max workers + - deafult is 3 + type: int + autovacuum_vacuum_cost_delay: + description: + - autovacuum cost delay in milliseconds + - default is 2 + type: int + wal_buffers: + description: + - wal buffers + - default is -1 + type: int + synchronous_commit: + description: + - synchronous commit flag + type: str + choices: ["on", "off", "local", "remote_apply", "remote_write"] + random_page_cost: + description: + - random page cost + - default is 4 + type: int + +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: creation of db params profile + ntnx_ndb_profiles: + name: "{{profile1_name}}" + desc: "testdesc" + type: database_parameter + database_type: postgres + database_parameter: + postgres: + max_connections: "{{max_connections}}" + max_replication_slots: "{{max_replication_slots}}" + max_locks_per_transaction: "{{max_locks_per_transaction}}" + effective_io_concurrency: "{{effective_io_concurrency}}" + timezone: "{{timezone}}" + max_prepared_transactions: "{{max_prepared_transactions}}" + max_wal_senders: "{{max_wal_senders}}" + min_wal_size: "{{min_wal_size}}" + max_wal_size: "{{max_wal_size}}" + wal_keep_segments: "{{wal_keep_segments}}" + max_worker_processes: "{{max_worker_processes}}" + checkpoint_timeout: "{{checkpoint_timeout}}" + autovacuum: "{{autovacuum}}" + checkpoint_completion_target: "{{checkpoint_completion_target}}" + autovacuum_freeze_max_age: "{{autovacuum_freeze_max_age}}" + autovacuum_vacuum_threshold: "{{autovacuum_vacuum_threshold}}" + autovacuum_vacuum_scale_factor: "{{autovacuum_vacuum_scale_factor}}" + autovacuum_work_mem: "{{autovacuum_work_mem}}" + autovacuum_max_workers: "{{autovacuum_max_workers}}" + autovacuum_vacuum_cost_delay: "{{autovacuum_vacuum_cost_delay}}" + wal_buffers: "{{wal_buffers}}" + synchronous_commit: "{{synchronous_commit}}" + random_page_cost: "{{random_page_cost}}" + register: result + +- name: create of single cluster network profile + ntnx_ndb_profiles: + name: "{{profile1_name}}" + desc: "testdesc" + type: network + database_type: postgres + network: + topology: single + vlans: + - + cluster: + name: "{{network_profile.single.cluster.name}}" + vlan_name: "{{network_profile.single.vlan_name}}" + enable_ip_address_selection: true + register: result + +- name: create of multiple cluster network profile + ntnx_ndb_profiles: + name: "{{profile3_name}}" + desc: "testdesc" + type: network + database_type: postgres + network: + topology: cluster + vlans: + - + cluster: + name: "{{network_profile.HA.cluster1.name}}" + vlan_name: "{{network_profile.HA.cluster1.vlan_name}}" + - + cluster: + name: "{{network_profile.HA.cluster2.name}}" + vlan_name: "{{network_profile.HA.cluster2.vlan_name}}" + +- name: creation of compute profile + ntnx_ndb_profiles: + name: "{{profile1_name}}" + desc: "testdesc" + type: compute + compute: + vcpus: 2 + cores_per_cpu: 4 + memory: 8 + register: result + +- name: create software profile with base version and cluster instance topology. Replicated to multiple clusters + ntnx_ndb_profiles: + name: "{{profile1_name}}-replicated" + desc: "{{profile1_name}}-desc-replicated" + type: "software" + database_type: "postgres" + software: + topology: "cluster" + name: "v1.0" + desc: "v1.0-desc" + notes: + os: "os_notes" + db_software: "db_notes" + db_server_vm: + uuid: "{{db_server_vm.uuid}}" + clusters: + - name: "{{cluster.cluster1.name}}" + - uuid: "{{cluster.cluster2.uuid}}" + register: result + +- name: create software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + database_type: "postgres" + software: + name: "v2.0" + desc: "v2.0-desc" + notes: + os: "os_notes for v2" + db_software: "db_notes for v2" + db_server_vm: + uuid: "{{db_server_vm.uuid}}" + + register: result + + +- name: update software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + database_type: "postgres" + software: + version_uuid: "{{result.version_uuid}}" + name: "v2.0-updated" + desc: "v2.0-desc-updated" + + register: result + +- name: publish software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + software: + version_uuid: "{{version_uuid}}" + publish: True + register: result + +""" + +RETURN = r""" +response: + description: response when profile is created + returned: always + type: dict + sample: { + "dateCreated": "2023-02-28 10:54:56", + "dateModified": "2023-02-28 10:54:56", + "dbVersion": "ALL", + "description": "new_name", + "engineType": "postgres_database", + "id": "76c1d0eb-28b8-4f37-aa8a-e17dfa1c87a6", + "latestVersion": "1.0", + "latestVersionId": "801c60da-9273-45bc-ab00-81fd452a5fa2", + "name": "new_name1", + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "owner": "eac70dbf-22fb-462b-9498-949796ca1f73", + "status": "READY", + "systemProfile": false, + "topology": "cluster", + "type": "Network", + "versions": [ + { + "dateCreated": "2023-02-28 10:54:56", + "dateModified": "2023-02-28 10:54:56", + "dbVersion": "ALL", + "deprecated": false, + "description": "new_name", + "engineType": "postgres_database", + "id": "801c60da-9273-45bc-ab00-81fd452a5fa2", + "name": "new_name1 (1.0)", + "owner": "eac70dbf-22fb-462b-9498-949796ca1f73", + "profileId": "76c1d0eb-28b8-4f37-aa8a-e17dfa1c87a6", + "properties": [ + { + "name": "CLUSTER_NAME_0", + "secure": false, + "value": "c1" + }, + { + "name": "CLUSTER_ID_0", + "secure": false, + "value": "0a3b964f-8616-40b9-a564-99cf35f4b8d8" + }, + { + "name": "CLUSTER_NAME_1", + "secure": false, + "value": "c2" + }, + { + "name": "CLUSTER_ID_1", + "secure": false, + "value": "94c3e490-69e2-4144-83ff-68867e47889d" + }, + { + "name": "VLAN_NAME_1", + "secure": false, + "value": "vlan.xxxxx" + }, + { + "name": "VLAN_NAME_0", + "secure": false, + "value": "vlan.xxxxx" + }, + { + "name": "NUM_CLUSTERS", + "secure": false, + "value": "2" + }, + { + "name": "ENABLE_IP_ADDRESS_SELECTION", + "secure": false, + "value": "false" + }, + { + "name": "VLAN_ID_1", + "secure": false, + "value": "91b11a7e-563f-480c-85bd-9cfe63a28e9d" + }, + { + "name": "VLAN_TYPE_1", + "secure": false, + "value": "DHCP" + } + ], + "propertiesMap": { + "CLUSTER_ID_0": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "CLUSTER_ID_1": "94c3e490-69e2-4144-83ff-68867e47889d", + "CLUSTER_NAME_0": "c1", + "CLUSTER_NAME_1": "c2", + "ENABLE_IP_ADDRESS_SELECTION": "false", + "NUM_CLUSTERS": "2", + "VLAN_ID_1": "91b11a7e-563f-480c-85bd-9cfe63a28e9d", + "VLAN_NAME_0": "vlan.xxxxx", + "VLAN_NAME_1": "vlan.xxxxx", + "VLAN_TYPE_1": "DHCP" + }, + "published": false, + "status": "READY", + "systemProfile": false, + "topology": "cluster", + "type": "Network", + "version": "1.0" + } + ] + } +profile: + description: part of response when profile is updated + returned: always + type: dict + sample: { + "dateCreated": "2023-02-28 10:54:56", + "dateModified": "2023-02-28 10:54:56", + "dbVersion": "ALL", + "description": "new_name", + "engineType": "postgres_database", + "id": "76c1d0eb-28b8-4f37-aa8a-e17dfa1c87a6", + "latestVersion": "1.0", + "latestVersionId": "801c60da-9273-45bc-ab00-81fd452a5fa2", + "name": "new_name1", + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "owner": "eac70dbf-22fb-462b-9498-949796ca1f73", + "status": "READY", + "systemProfile": false, + "topology": "cluster", + "type": "Network", + "versions": [ + { + "dateCreated": "2023-02-28 10:54:56", + "dateModified": "2023-02-28 10:54:56", + "dbVersion": "ALL", + "deprecated": false, + "description": "new_name", + "engineType": "postgres_database", + "id": "801c60da-9273-45bc-ab00-81fd452a5fa2", + "name": "new_name1 (1.0)", + "owner": "eac70dbf-22fb-462b-9498-949796ca1f73", + "profileId": "76c1d0eb-28b8-4f37-aa8a-e17dfa1c87a6", + "properties": [ + { + "name": "CLUSTER_NAME_0", + "secure": false, + "value": "c1" + }, + { + "name": "CLUSTER_ID_0", + "secure": false, + "value": "0a3b964f-8616-40b9-a564-99cf35f4b8d8" + }, + { + "name": "CLUSTER_NAME_1", + "secure": false, + "value": "c2" + }, + { + "name": "CLUSTER_ID_1", + "secure": false, + "value": "94c3e490-69e2-4144-83ff-68867e47889d" + }, + { + "name": "VLAN_NAME_1", + "secure": false, + "value": "vlan.xxxxx" + }, + { + "name": "VLAN_NAME_0", + "secure": false, + "value": "vlan.xxxxx" + }, + { + "name": "NUM_CLUSTERS", + "secure": false, + "value": "2" + }, + { + "name": "ENABLE_IP_ADDRESS_SELECTION", + "secure": false, + "value": "false" + }, + { + "name": "VLAN_ID_1", + "secure": false, + "value": "91b11a7e-563f-480c-85bd-9cfe63a28e9d" + }, + { + "name": "VLAN_TYPE_1", + "secure": false, + "value": "DHCP" + } + ], + "propertiesMap": { + "CLUSTER_ID_0": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "CLUSTER_ID_1": "94c3e490-69e2-4144-83ff-68867e47889d", + "CLUSTER_NAME_0": "c1", + "CLUSTER_NAME_1": "c2", + "ENABLE_IP_ADDRESS_SELECTION": "false", + "NUM_CLUSTERS": "2", + "VLAN_ID_1": "91b11a7e-563f-480c-85bd-9cfe63a28e9d", + "VLAN_NAME_0": "vlan.xxxxx", + "VLAN_NAME_1": "vlan.xxxxx", + "VLAN_TYPE_1": "DHCP" + }, + "published": false, + "status": "READY", + "systemProfile": false, + "topology": "cluster", + "type": "Network", + "version": "1.0" + } + ] + } +version: + description: part of response denoting status of any version operation during update or delete + returned: always + type: dict + sample: { + "dateCreated": "2023-02-28 10:54:56", + "dateModified": "2023-02-28 10:54:56", + "dbVersion": "ALL", + "deprecated": false, + "description": "new_name", + "engineType": "postgres_database", + "id": "801c60da-9273-45bc-ab00-81fd452a5fa2", + "name": "new_name1 (1.0)", + "owner": "eac70dbf-22fb-462b-9498-949796ca1f73", + "profileId": "76c1d0eb-28b8-4f37-aa8a-e17dfa1c87a6", + "properties": [ + { + "name": "CLUSTER_NAME_0", + "secure": false, + "value": "c1" + }, + { + "name": "CLUSTER_ID_0", + "secure": false, + "value": "0a3b964f-8616-40b9-a564-99cf35f4b8d8" + }, + { + "name": "CLUSTER_NAME_1", + "secure": false, + "value": "c2" + }, + { + "name": "CLUSTER_ID_1", + "secure": false, + "value": "94c3e490-69e2-4144-83ff-68867e47889d" + }, + { + "name": "VLAN_NAME_1", + "secure": false, + "value": "vlan.xxxxx" + }, + { + "name": "VLAN_NAME_0", + "secure": false, + "value": "vlan.xxxxx" + }, + { + "name": "NUM_CLUSTERS", + "secure": false, + "value": "2" + }, + { + "name": "ENABLE_IP_ADDRESS_SELECTION", + "secure": false, + "value": "false" + }, + { + "name": "VLAN_ID_1", + "secure": false, + "value": "91b11a7e-563f-480c-85bd-9cfe63a28e9d" + }, + { + "name": "VLAN_TYPE_1", + "secure": false, + "value": "DHCP" + } + ], + "propertiesMap": { + "CLUSTER_ID_0": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "CLUSTER_ID_1": "94c3e490-69e2-4144-83ff-68867e47889d", + "CLUSTER_NAME_0": "c1", + "CLUSTER_NAME_1": "c2", + "ENABLE_IP_ADDRESS_SELECTION": "false", + "NUM_CLUSTERS": "2", + "VLAN_ID_1": "91b11a7e-563f-480c-85bd-9cfe63a28e9d", + "VLAN_NAME_0": "vlan.xxxxx", + "VLAN_NAME_1": "vlan.xxxxx", + "VLAN_TYPE_1": "DHCP" + }, + "published": false, + "status": "READY", + "systemProfile": false, + "topology": "cluster", + "type": "Network", + "version": "1.0" + } +profile_uuid: + description: profile uuid + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" +version_uuid: + description: uuid of profile version when any operation is done on version of profile + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" +""" + +import time # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.ndb.profiles.profile_types import get_profile_type_obj # noqa: E402 +from ..module_utils.ndb.profiles.profiles import Profile # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + +profile_types_with_version_support = ["software"] +profile_types_with_wait_support = ["software"] + + +# Notes: +# 1. publish/deprecate/unpublish can only be done using update. +# 2. keep version spec as part of profile related spec, +# as module avoids version operations if profile related spec is not found. +def get_module_spec(): + mutually_exclusive = [("name", "uuid")] + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + + postgres_params = dict( + max_connections=dict(type="int"), + max_replication_slots=dict(type="int"), + max_locks_per_transaction=dict(type="int"), + effective_io_concurrency=dict(type="int"), + timezone=dict(type="str"), + max_prepared_transactions=dict(type="int"), + max_wal_senders=dict(type="int"), + min_wal_size=dict(type="int"), + max_wal_size=dict(type="int"), + wal_keep_segments=dict(type="int"), + max_worker_processes=dict(type="int"), + checkpoint_timeout=dict(type="int"), + autovacuum=dict(type="str", choices=["on", "off"]), + checkpoint_completion_target=dict(type="float"), + autovacuum_freeze_max_age=dict(type="int"), + autovacuum_vacuum_threshold=dict(type="int"), + autovacuum_vacuum_scale_factor=dict(type="float"), + autovacuum_work_mem=dict(type="int"), + autovacuum_max_workers=dict(type="int"), + autovacuum_vacuum_cost_delay=dict(type="int"), + wal_buffers=dict(type="int"), + synchronous_commit=dict( + type="str", choices=["on", "off", "local", "remote_apply", "remote_write"] + ), + random_page_cost=dict(type="int"), + ) + + vlan = dict( + cluster=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=True, + ), + vlan_name=dict(type="str", required=True), + ) + + notes = dict(os=dict(type="str"), db_software=dict(type="str")) + + compute = dict( + vcpus=dict(type="int"), + cores_per_cpu=dict(type="int"), + memory=dict(type="int"), + publish=dict(type="bool", required=False), + ) + + network = dict( + topology=dict(type="str", choices=["single", "cluster"]), + vlans=dict(type="list", elements="dict", options=vlan), + enable_ip_address_selection=dict(type="bool"), + publish=dict(type="bool", required=False), + ) + + software = dict( + topology=dict(type="str", choices=["single", "cluster"]), + state=dict(type="str", choices=["present", "absent"], default="present"), + version_uuid=dict(type="str"), + name=dict(type="str"), + desc=dict(type="str"), + notes=dict(type="dict", options=notes), + db_server_vm=dict( + type="dict", options=entity_by_spec, mutually_exclusive=mutually_exclusive + ), + publish=dict(type="bool", required=False), + deprecate=dict(type="bool", required=False), + ) + + database_parameter = dict( + postgres=dict(type="dict", options=postgres_params), + publish=dict(type="bool", required=False), + ) + + module_args = dict( + profile_uuid=dict(type="str", required=False), + name=dict(type="str", required=False), + desc=dict(type="str", required=False), + type=dict( + type="str", + choices=["software", "compute", "network", "database_parameter"], + required=False, + ), + database_type=dict(type="str", choices=["postgres"]), + compute=dict(type="dict", options=compute, required=False), + software=dict(type="dict", options=software, required=False), + network=dict(type="dict", options=network, required=False), + database_parameter=dict( + type="dict", options=database_parameter, required=False + ), + clusters=dict( + type="list", + elements="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=False, + ), + ) + return module_args + + +def check_profile_idempotency(old_spec, new_spec): + """ + This routine is used to check idempotency of a profile + """ + + if old_spec.get("name") != new_spec.get("name"): + return False + if old_spec.get("description") != new_spec.get("description"): + return False + + # check cluster availability update for software profile + if new_spec.get("updateClusterAvailability"): + old_clusters = [] + for cluster in old_spec.get("clusterAvailability", []): + if cluster["status"] == "ACTIVE": + old_clusters.append(cluster["nxClusterId"]) + + new_clusters = new_spec.get("availableClusterIds", []) + + if len(new_clusters) != len(old_clusters): + return False + + # update if availibility of cluster is required + for cluster in new_clusters: + if cluster not in old_clusters: + return False + + return True + + +def create_profile_version(module, result, profile_uuid, profile_obj): + spec, err = profile_obj.get_spec(create=True, version=True) + if err: + result["error"] = err + module.fail_json(msg="Failed generating profile version create spec", **result) + + if module.check_mode: + result["response"]["version"] = spec + return + + resp = profile_obj.create_version(profile_uuid, data=spec) + version_uuid = resp.get("entityId") + result["version_uuid"] = version_uuid + result["response"]["version"] = resp + + profile_type = profile_obj.get_type().lower() + if ( + module.params.get("wait") + and profile_type in profile_types_with_wait_support + and resp.get("operationId") + ): + + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(3) # to get operation ID functional + operations.wait_for_completion(ops_uuid, delay=10) + + result["response"]["version"] = profile_obj.get_profile_by_version( + uuid=profile_uuid, version_uuid=version_uuid + ) + + result["changed"] = True + + +def update_profile_version(module, result, profile_uuid, profile_obj): + profile_type = profile_obj.get_type().lower() + config = module.params.get(profile_type) + + version_uuid = "latest" + if config and config.get("version_uuid"): + version_uuid = config.get("version_uuid") + + version = profile_obj.get_profile_by_version( + uuid=profile_uuid, version_uuid=version_uuid + ) + version_uuid = version.get("entityId") or version.get("id") + result["version_uuid"] = version_uuid + + engine_type = version.get("engineType") + + default_spec = profile_obj.get_default_version_update_spec(override_spec=version) + + kwargs = {"version": True, "update": True, "engine_type": engine_type} + spec, err = profile_obj.get_spec(old_spec=default_spec, **kwargs) + if err: + result["error"] = err + module.fail_json(msg="Failed generating profile version update spec", **result) + + if spec.get("propertiesMap"): + spec.pop("propertiesMap") + + if module.check_mode: + result["response"]["version"] = spec + return + + resp = profile_obj.update_version(profile_uuid, version_uuid, spec) + result["response"]["version"] = resp + result["changed"] = True + + if ( + module.params.get("wait") + and profile_type in profile_types_with_wait_support + and resp.get("operationId") + ): + + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(3) # to get operation ID functional + operations.wait_for_completion(ops_uuid, delay=10) + + result["response"]["version"] = profile_obj.get_profile_by_version( + uuid=profile_uuid, version_uuid=version_uuid + ) + + +def delete_profile_version(module, result, profile_uuid, profile_obj): + profile_type = profile_obj.get_type().lower() + + config = module.params.get(profile_type) + + version_uuid = config.get("version_uuid") + if not version_uuid: + module.fail_json(msg="uuid is required field for version delete", **result) + + resp = profile_obj.delete_version( + profile_uuid=profile_uuid, version_uuid=version_uuid + ) + result["response"]["version"] = resp + result["changed"] = True + + +def version_operations(module, result, profile_uuid, profile_obj): + profile_type = profile_obj.get_type().lower() + result["profile_type"] = profile_type + if profile_type not in profile_types_with_version_support: + update_profile_version(module, result, profile_uuid, profile_obj) + else: + profile_config = module.params.get(profile_type) + state = profile_config.get("state", "present") + if state == "present": + if profile_config.get("version_uuid"): + update_profile_version(module, result, profile_uuid, profile_obj) + else: + create_profile_version(module, result, profile_uuid, profile_obj) + else: + delete_profile_version(module, result, profile_uuid, profile_obj) + + +def create_profile(module, result): + profile_type = module.params.get("type") + if not profile_type: + return module.fail_json( + "'type' is required field for creating profile of certain type" + ) + + _profile, err = get_profile_type_obj(module, profile_type=profile_type) + if err: + result["error"] = err + err_msg = "Failed getting object for profile type {0}".format(profile_type) + module.fail_json(msg=err_msg, **result) + + spec, err = _profile.get_spec(create=True) + if err: + result["error"] = err + module.fail_json(msg="Failed generating create profile spec", **result) + + if module.check_mode: + result["response"] = spec + return + + resp = _profile.create(data=spec) + result["response"] = resp + uuid = resp.get("id") + + # incase there is process of replication triggered, operation info is recieved + if profile_type == "software" and not uuid: + uuid = resp.get("entityId") + + if not uuid: + return module.fail_json( + msg="Failed fetching uuid post profile creation", **result + ) + + result["profile_uuid"] = uuid + + # polling is only required for software profile + if ( + module.params.get("wait") + and profile_type in profile_types_with_wait_support + and resp.get("operationId") + ): + + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(3) # to get operation ID functional + operations.wait_for_completion(ops_uuid, delay=10) + + resp = _profile.get_profiles(uuid=uuid) + result["response"] = resp + + result["changed"] = True + + +def update_profile(module, result): + uuid = module.params.get("profile_uuid") + if not uuid: + module.fail_json(msg="profile_uuid is required field for update", **result) + + result["profile_uuid"] = uuid + + _profile = Profile(module) + + profile = _profile.get_profiles(uuid=uuid) + + profile_type = module.params.get("type") or profile.get("type", "").lower() + if not profile_type: + result["response"] = profile + return module.fail_json(msg="Failed getting profile type", **result) + + _profile, err = get_profile_type_obj(module, profile_type=profile_type) + if err: + result["error"] = err + err_msg = "Failed generating object for profile type {0}".format(profile_type) + module.fail_json(msg=err_msg, **result) + + # profile update operations + default_update_spec = _profile.get_default_update_spec(override_spec=profile) + + profile_update_spec, err = _profile.get_spec( + old_spec=default_update_spec, update=True + ) + if err: + result["error"] = err + module.fail_json(msg="Failed creating profile update spec", **result) + + result["response"] = {} + + if module.check_mode: + result["response"]["profile"] = profile_update_spec + + if not module.check_mode and not check_profile_idempotency( + profile, profile_update_spec + ): + resp = _profile.update(data=profile_update_spec, uuid=uuid) + result["response"]["profile"] = resp + result["changed"] = True + if ( + module.params.get("wait") + and profile_type in profile_types_with_wait_support + and resp.get("operationId") + ): + + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(3) # to get operation ID functional + operations.wait_for_completion(ops_uuid, delay=10) + + resp = _profile.get_profiles(uuid=uuid) + result["response"]["profile"] = resp + + # perform versions related crud as per support + # version spec needs to be part of spec of profile type + if module.params.get(profile_type): + version_operations(module, result, profile_uuid=uuid, profile_obj=_profile) + + if not module.check_mode: + resp = _profile.get_profiles(uuid=uuid) + result["response"]["profile"] = resp + + +def delete_profile(module, result): + profiles = Profile(module) + + uuid = module.params.get("profile_uuid") + if not uuid: + return module.fail_json(msg="'profile_uuid' is a required for deleting profile") + + resp = profiles.delete(uuid) + + result["response"] = resp + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + required_if=[ + ("state", "present", ("name", "profile_uuid"), True), + ("state", "absent", ("profile_uuid",)), + ], + supports_check_mode=True, + ) + + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "profile_uuid": None} + if module.params["state"] == "present": + if module.params.get("profile_uuid"): + update_profile(module, result) + else: + create_profile(module, result) + else: + delete_profile(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_profiles_info.py b/plugins/modules/ntnx_ndb_profiles_info.py index 0756d97fc..cf08eb53a 100644 --- a/plugins/modules/ntnx_ndb_profiles_info.py +++ b/plugins/modules/ntnx_ndb_profiles_info.py @@ -11,7 +11,7 @@ --- module: ntnx_ndb_profiles_info short_description: info module for ndb profiles -version_added: 1.8.0-beta.1 +version_added: 1.8.0 description: 'Get profile info' options: name: @@ -20,24 +20,34 @@ type: str uuid: description: - - profile id + - profile uuid type: str - profile_type: - description: - - profile type - type: str - choices: ["Software", "Compute", "Network", "Database_Parameter"] version_id: description: - - vrsion id + - vrsion uuid type: str latest_version: description: - - whether the lastet version of profile or no + - to fetch latest version of profile in case of software profile type: bool default: false + filters: + description: + - filters for fetching info + type: dict + suboptions: + engine: + description: + - filter as per database engine type + type: str + choices: ["oracle_database","postgres_database","sqlserver_database","mariadb_database","mysql_database","saphana_database","mongodb_database",] + type: + description: + - filter as per profile type + type: str + choices: ["Software","Compute","Network","Database_Parameter",] extends_documentation_fragment: - - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_ndb_info_base_module author: - Prem Karat (@premkarat) - Gevorg Khachatryan (@Gevorg-Khachatryan-97) @@ -168,19 +178,45 @@ """ from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 -from ..module_utils.ndb.profiles import Profile # noqa: E402 +from ..module_utils.ndb.profiles.profiles import Profile # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 def get_module_spec(): + filters_spec = dict( + engine=dict( + type="str", + choices=[ + "oracle_database", + "postgres_database", + "sqlserver_database", + "mariadb_database", + "mysql_database", + "saphana_database", + "mongodb_database", + ], + ), + type=dict( + type="str", + choices=[ + "Software", + "Compute", + "Network", + "Database_Parameter", + ], + ), + ) + module_args = dict( name=dict(type="str"), uuid=dict(type="str"), - profile_type=dict( - type="str", choices=["Software", "Compute", "Network", "Database_Parameter"] - ), version_id=dict(type="str"), latest_version=dict(type="bool", default=False), + filters=dict( + type="dict", + options=filters_spec, + ), ) return module_args @@ -190,19 +226,16 @@ def get_profile(module, result): profile = Profile(module) name = module.params.get("name") uuid = module.params.get("uuid") - type = module.params.get("profile_type") - resp, err = profile.get_profiles(uuid, name, type) - if err: - result["error"] = err - module.fail_json(msg="Failed fetching profile info", **result) + resp = profile.get_profiles(uuid, name) result["response"] = resp def get_profiles(module, result): profile = Profile(module) + query_params = module.params.get("filters") - resp = profile.read() + resp = profile.read(query=query_params) result["response"] = resp @@ -231,14 +264,11 @@ def run_module(): required_by={"version_id": "uuid"}, required_if=[("latest_version", True, ("uuid",))], ) + remove_param_with_none_value(module.params) result = {"changed": False, "error": None, "response": None} if module.params.get("version_id") or module.params.get("latest_version"): get_profiles_version(module, result) - elif ( - module.params.get("name") - or module.params.get("uuid") - or module.params.get("profile_type") - ): + elif module.params.get("name") or module.params.get("uuid"): get_profile(module, result) else: get_profiles(module, result) diff --git a/plugins/modules/ntnx_ndb_register_database.py b/plugins/modules/ntnx_ndb_register_database.py new file mode 100644 index 000000000..503e23bfb --- /dev/null +++ b/plugins/modules/ntnx_ndb_register_database.py @@ -0,0 +1,837 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_register_database +short_description: module for database instance registration +version_added: 1.8.0 +description: + - module for database instance registration + - currently, only postgres single instance database registration is supported +options: + name: + description: + - name of database instance to be created in ndb + type: str + required: true + desc: + description: + - description of database instance + type: str + db_vm: + description: + - source database server vm details + - either registered or non registered vm can be configured as source + type: dict + required: true + suboptions: + registered: + description: + - configure a registered vm as source + type: dict + suboptions: + name: + description: + - name of database server vm + - mutually exclusive with C(uuid) and C(ip) + type: str + uuid: + description: + - name of database server vm + - mutually exclusive with C(name) and C(ip) + type: str + ip: + description: + - ip of database server vm + - mutually exclusive with C(uuid) and C(name) + type: str + unregistered: + description: + - configure a unregistered vm as source + - registration of database will also register given vm + type: dict + suboptions: + ip: + description: + - ip of vm + type: str + required: true + username: + description: + - username of vm + type: str + required: true + private_key: + description: + - private key of vm + - mutually exclusive with C(password) + type: str + password: + description: + - password of vm + - mutually exclusive with C(private_key) + type: str + desc: + description: + - set description of vm + type: str + reset_desc_in_ntnx_cluster: + description: + - reset description in cluster to C(desc) given + type: bool + default: false + cluster: + description: + - cluster where vm is present + type: dict + required: true + suboptions: + name: + description: + - name of cluster + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of cluster + - mutually exclusive with C(name) + type: str + time_machine: + description: + - configure new time machine for database instance + type: dict + required: true + suboptions: + name: + description: + - name of time machine + type: str + required: true + desc: + description: + - description of time machine + type: str + sla: + description: + - configure sla + type: dict + required: true + suboptions: + name: + description: + - name of sla + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of sla + - mutually exclusive with C(name) + type: str + schedule: + description: + - configure schedule of snapshots + type: dict + suboptions: + daily: + type: str + description: daily snapshot time in HH:MM:SS format + weekly: + type: str + description: weekly snapshot day. For Example, "WEDNESDAY" + monthly: + type: int + description: monthly snapshot day in a month + quaterly: + type: str + description: + - quaterly snapshot month + - day of month is set based on C(monthly) + - C(monthly) is required for setting C(quaterly) else it is ignored + - For Example, "JANUARY" + yearly: + type: str + description: + - yearly snapshot month + - day of month is set based on C(monthly) + - C(monthly) is required for setting C(yearly) else it is ignored + - For Example, "JANUARY" + log_catchup: + type: int + description: log catchup intervals in minutes + choices: + - 15 + - 30 + - 60 + - 90 + - 120 + snapshots_per_day: + type: int + description: num of snapshots per day + default: 1 + auto_tune_log_drive: + description: + - set flag for auto tuning of log drive + type: bool + default: true + postgres: + description: + - potgres related configuration + type: dict + suboptions: + listener_port: + description: + - listener port of database in vm + type: str + default: "5432" + db_name: + description: + - intial database that would be added + type: str + required: true + db_password: + description: + - password of C(db_user) in database instance + type: str + required: true + db_user: + description: + - user name for connecting to database instance in vm + type: str + default: "postgres" + software_path: + description: + - path where desired postgres instance is located. For ex. "/usr/pgsql-10.4" + type: str + type: + description: + - architecture type of database + type: str + choices: ["single"] + default: "single" + tags: + description: + - dict of tag name as key and tag value as value + type: dict + auto_tune_staging_drive: + description: + - flag for auto tuning staging drive + type: bool + working_directory: + description: + - directory path to be created and used by ndb for its scripts + type: str + default: "/tmp" + automated_patching: + description: + - configure automated patching using maintenance windows + - to be only used while creation + type: dict + suboptions: + maintenance_window: + description: + - maintenance window details + type: dict + suboptions: + name: + description: + - name of maintenance window + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of maintenance window + - mutually exclusive with C(name) + type: str + tasks: + description: + - list of maintenance pre-post tasks + type: list + elements: dict + suboptions: + type: + description: + - type of patching + type: str + choices: ["OS_PATCHING", "DB_PATCHING"] + pre_task_cmd: + description: + - full os command which needs to run before patching task in db server vm + type: str + post_task_cmd: + description: + - full os command which needs to run after patching task in db server vm + type: str + +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations + +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: regsiter database from registered vm + ntnx_ndb_register_database: + wait: true + + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_vm: + registered: + name: "{{vm1_name}}" + + postgres: + db_name: testAnsible1 + db_password: "{{vm_password}}" + software_path: "{{postgres.software_home}}" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + + tags: + ansible-databases: "single-instance-dbs" + + register: result + +- name: register database from unregistred vm + ntnx_ndb_register_database: + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_vm: + unregistered: + ip: "{{_vm_ip}}" + username: "{{vm_username}}" + password: "{{vm_password}}" + desc: "vm-desc-updated" + reset_desc_in_ntnx_cluster: True + cluster: + name: "{{cluster.cluster1.name}}" + + postgres: + db_name: testAnsible1 + db_password: "{{vm_password}}" + software_path: "{{postgres.software_home}}" + db_user: "postgres" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + + tags: + ansible-databases: "single-instance-dbs" + register: result +""" +RETURN = r""" +response: + description: database creation response after provisioning + returned: always + type: dict + sample: { + "category": "DB_GROUP_IMPLICIT", + "clone": false, + "clustered": false, + "databaseNodes": [ + { + "databaseId": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "databaseStatus": "READY", + "dateCreated": "2022-10-19 18:49:25", + "dateModified": "2022-10-19 18:51:33", + "dbserver": null, + "dbserverId": "0bee18d7-1f7c-4a7b-8d52-cd7f22f3121a", + "description": "postgres_database POSTGRES_DATABASE_ANSIBLE on host 10.51.144.213", + "id": "7228a75f-86d9-4a5b-aa1a-cc52c1fcfce3", + "info": { + "info": {}, + "secureInfo": null + }, + "metadata": null, + "name": "POSTGRES_DATABASE_ANSIBLE", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "primary": false, + "properties": [], + "protectionDomain": null, + "protectionDomainId": "d67b312c-6f3a-4322-a9f2-15ec0bdc9dc5", + "softwareInstallationId": "b48c4b34-a6a1-4040-b4df-0bd4ab9c9e2c", + "status": "READY", + "tags": [] + } + ], + "dateCreated": "2022-10-19 18:26:55", + "dateModified": "2022-10-19 18:51:26", + "dbserverLogicalClusterId": null, + "dbserverlogicalCluster": null, + "description": null, + "eraCreated": true, + "groupInfo": null, + "id": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "info": { + "info": { + "bpg_configs": { + "bpg_db_param": { + "effective_cache_size": "3GB", + "maintenance_work_mem": "512MB", + "max_parallel_workers_per_gather": "2", + "max_worker_processes": "8", + "shared_buffers": "1024MB", + "work_mem": "32MB" + }, + "storage": { + "archive_storage": { + "size": 600.0 + }, + "data_disks": { + "count": 4.0 + }, + "log_disks": { + "count": 4.0, + "size": 100.0 + } + }, + "vm_properties": { + "dirty_background_ratio": 5.0, + "dirty_expire_centisecs": 500.0, + "dirty_ratio": 15.0, + "dirty_writeback_centisecs": 100.0, + "nr_hugepages": 118.0, + "overcommit_memory": 1.0, + "swappiness": 0.0 + } + } + }, + "secureInfo": {} + }, + "linkedDatabases": [ + { + "databaseName": "prad", + "databaseStatus": "READY", + "dateCreated": "2022-10-19 18:48:37", + "dateModified": "2022-10-19 18:48:37", + "description": null, + "id": "6d4da687-a425-43f1-a9df-fa28a6b0af80", + "info": { + "info": { + "created_by": "user" + }, + "secureInfo": null + }, + "metadata": null, + "metric": null, + "name": "prad", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentDatabaseId": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "parentLinkedDatabaseId": null, + "snapshotId": null, + "status": "READY", + "timeZone": null + }, + { + "databaseName": "postgres", + "databaseStatus": "READY", + "dateCreated": "2022-10-19 18:48:37", + "dateModified": "2022-10-19 18:48:37", + "description": null, + "id": "67314b51-326f-4fc8-a345-668933a18cbd", + "info": { + "info": { + "created_by": "system" + }, + "secureInfo": null + }, + "metadata": null, + "metric": null, + "name": "postgres", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentDatabaseId": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "parentLinkedDatabaseId": null, + "snapshotId": null, + "status": "READY", + "timeZone": null + }, + { + "databaseName": "template0", + "databaseStatus": "READY", + "dateCreated": "2022-10-19 18:48:37", + "dateModified": "2022-10-19 18:48:37", + "description": null, + "id": "ba4bf273-b5ab-4743-a222-dffa178220f2", + "info": { + "info": { + "created_by": "system" + }, + "secureInfo": null + }, + "metadata": null, + "metric": null, + "name": "template0", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentDatabaseId": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "parentLinkedDatabaseId": null, + "snapshotId": null, + "status": "READY", + "timeZone": null + }, + { + "databaseName": "template1", + "databaseStatus": "READY", + "dateCreated": "2022-10-19 18:48:37", + "dateModified": "2022-10-19 18:48:37", + "description": null, + "id": "704d8464-d8aa-47ff-8f79-347cfae90abd", + "info": { + "info": { + "created_by": "system" + }, + "secureInfo": null + }, + "metadata": null, + "metric": null, + "name": "template1", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentDatabaseId": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "parentLinkedDatabaseId": null, + "snapshotId": null, + "status": "READY", + "timeZone": null + } + ], + "provisionOperationId": "d9b1924f-a768-4cd8-886b-7a69e61f5b89", + "metric": null, + "name": "POSTGRES_DATABASE_ANSIBLE", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentDatabaseId": null, + "parentSourceDatabaseId": null, + "parentTimeMachineId": null, + "placeholder": false, + "properties": [ + { + "description": null, + "name": "db_parameter_profile_id", + "ref_id": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "secure": false, + "value": "a80ac1fb-8787-4442-8f38-eeb2417a8cbb" + }, + { + "description": null, + "name": "auth", + "ref_id": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "secure": false, + "value": "md5" + }, + { + "description": null, + "name": "AUTO_EXTEND_DB_STAGE", + "ref_id": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "secure": false, + "value": "true" + }, + { + "description": null, + "name": "provisioning_spec", + "ref_id": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "secure": false, + "value": "" + }, + { + "description": null, + "name": "version", + "ref_id": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "secure": false, + "value": "10.4" + }, + { + "description": null, + "name": "vm_ip", + "ref_id": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "secure": false, + "value": "xx.xx.xx.xx" + }, + { + "description": null, + "name": "postgres_software_home", + "ref_id": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "secure": false, + "value": "%2Fusr%2Fpgsql-10.4" + }, + { + "description": null, + "name": "listener_port", + "ref_id": "e9374379-de51-4cc8-8d12-b1b6eb64d129", + "secure": false, + "value": "5432" + } + ], + "status": "READY", + "tags": [], + "timeMachine": null, + "timeMachineId": "be524e70-60ad-4a8c-a0ee-8d72f954d7e6", + "timeZone": "UTC", + "type": "postgres_database" + } +db_uuid: + description: created database UUID + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" +""" +import time # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.database_instances import DatabaseInstance # noqa: E402 +from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 +from ..module_utils.ndb.maintenance_window import ( # noqa: E402 + AutomatedPatchingSpec, + MaintenanceWindow, +) +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.ndb.tags import Tag # noqa: E402 +from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + mutually_exclusive = [("name", "uuid")] + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + automated_patching = deepcopy( + AutomatedPatchingSpec.automated_patching_argument_spec + ) + registered_vm = dict( + name=dict(type="str", required=False), + uuid=dict(type="str", required=False), + ip=dict(type="str", required=False), + ) + + unregistered_vm = dict( + ip=dict(type="str", required=True), + username=dict(type="str", required=True), + private_key=dict(type="str", required=False, no_log=True), + password=dict(type="str", required=False, no_log=True), + desc=dict(type="str", required=False), + reset_desc_in_ntnx_cluster=dict(type="bool", default=False, required=False), + cluster=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=True, + ), + ) + + db_vm = dict( + registered=dict( + type="dict", + options=registered_vm, + mutually_exclusive=[("name", "uuid", "ip")], + required=False, + ), + unregistered=dict( + type="dict", + options=unregistered_vm, + mutually_exclusive=[("password", "private_key")], + required=False, + ), + ) + + sla = dict( + uuid=dict(type="str", required=False), + name=dict(type="str", required=False), + ) + + schedule = dict( + daily=dict(type="str", required=False), + weekly=dict(type="str", required=False), + monthly=dict(type="int", required=False), + quaterly=dict(type="str", required=False), + yearly=dict(type="str", required=False), + log_catchup=dict(type="int", choices=[15, 30, 60, 90, 120], required=False), + snapshots_per_day=dict(type="int", required=False, default=1), + ) + + time_machine = dict( + name=dict(type="str", required=True), + desc=dict(type="str", required=False), + sla=dict( + type="dict", + options=sla, + mutually_exclusive=mutually_exclusive, + required=True, + ), + schedule=dict(type="dict", options=schedule, required=False), + auto_tune_log_drive=dict(type="bool", required=False, default=True), + ) + + postgres = dict( + listener_port=dict(type="str", default="5432", required=False), + db_name=dict(type="str", required=True), + db_password=dict(type="str", required=True, no_log=True), + db_user=dict(type="str", default="postgres", required=False), + software_path=dict(type="str", required=False), + type=dict(type="str", choices=["single"], default="single", required=False), + ) + + module_args = dict( + name=dict(type="str", required=True), + desc=dict(type="str", required=False), + db_vm=dict( + type="dict", + options=db_vm, + mutually_exclusive=[("registered", "unregistered")], + required=True, + ), + time_machine=dict(type="dict", options=time_machine, required=True), + postgres=dict(type="dict", options=postgres, required=False), + tags=dict(type="dict", required=False), + auto_tune_staging_drive=dict(type="bool", required=False), + working_directory=dict(type="str", default="/tmp", required=False), + automated_patching=dict( + type="dict", options=automated_patching, required=False + ), + ) + return module_args + + +def get_registration_spec(module, result): + + # create database instance obj + db_instance = DatabaseInstance(module=module) + + # get default spec + spec = db_instance.get_default_registration_spec() + + # populate VM related spec + db_vm = DBServerVM(module=module) + + use_registered_server = ( + True if module.params.get("db_vm", {}).get("registered") else False + ) + register_server = not use_registered_server + + kwargs = { + "use_registered_server": use_registered_server, + "register_server": register_server, + "db_instance_register": True, + } + spec, err = db_vm.get_spec(old_spec=spec, **kwargs) + if err: + result["error"] = err + err_msg = "Failed getting vm spec for new database instance registration" + module.fail_json(msg=err_msg, **result) + + # populate database engine related spec + spec, err = db_instance.get_db_engine_spec(spec, register=True) + if err: + result["error"] = err + err_msg = "Failed getting database engine related spec for database instance registration" + module.fail_json(msg=err_msg, **result) + + # populate database instance related spec + spec, err = db_instance.get_spec(spec, register=True) + if err: + result["error"] = err + err_msg = "Failed getting spec for database instance registration" + module.fail_json(msg=err_msg, **result) + + # populate time machine related spec + time_machine = TimeMachine(module) + spec, err = time_machine.get_spec(spec) + if err: + result["error"] = err + err_msg = ( + "Failed getting spec for time machine for database instance registration" + ) + module.fail_json(msg=err_msg, **result) + + # populate tags related spec + tags = Tag(module) + spec, err = tags.get_spec(spec, associate_to_entity=True, type="DATABASE") + if err: + result["error"] = err + err_msg = "Failed getting spec for tags for database instance registration" + module.fail_json(msg=err_msg, **result) + + # configure automated patching + if module.params.get("automated_patching"): + mw = MaintenanceWindow(module) + mw_spec, err = mw.get_spec(configure_automated_patching=True) + if err: + result["error"] = err + err_msg = "Failed getting spec for automated patching in database instance" + module.fail_json(msg=err_msg, **result) + spec["maintenanceTasks"] = mw_spec + + return spec + + +def register_instance(module, result): + db_instance = DatabaseInstance(module) + + spec = get_registration_spec(module, result) + + if module.check_mode: + result["response"] = spec + return + + resp = db_instance.register(data=spec) + result["response"] = resp + result["db_uuid"] = resp["entityId"] + db_uuid = resp["entityId"] + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(5) # to get operation ID functional + operations.wait_for_completion(ops_uuid, delay=15) + query = {"detailed": True, "load-dbserver-cluster": True} + resp = db_instance.read(db_uuid, query=query) + result["response"] = resp + + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "db_uuid": None} + register_instance(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_register_db_server_vm.py b/plugins/modules/ntnx_ndb_register_db_server_vm.py new file mode 100644 index 000000000..3c5a9c240 --- /dev/null +++ b/plugins/modules/ntnx_ndb_register_db_server_vm.py @@ -0,0 +1,471 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +DOCUMENTATION = r""" +--- +module: ntnx_ndb_register_db_server_vm +short_description: module for registration of database server vm +version_added: 1.8.0 +description: module for registration of database server vm +options: + ip: + description: + - IP of vm + type: str + required: true + desc: + description: + - set description of vm in ndb + type: str + reset_desc_in_ntnx_cluster: + description: + - reset description of vm in cluster as per C(desc) in ndb + type: bool + default: false + cluster: + description: + - cluster where db server vm is hosted + type: dict + required: true + suboptions: + name: + description: + - name of cluster + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of cluster + - mutually exclusive with C(name) + type: str + postgres: + description: + - potgres related configuration + type: dict + suboptions: + listener_port: + description: + - listener port of database + type: str + default: "5432" + software_path: + description: + - path where desired postgres instance is located. For ex. "/usr/pgsql-10.4" + type: str + required: true + username: + description: + - username to access vm + type: str + required: true + password: + description: + - password for accessing vm + - mutually_exclusive with C(private_ssh_key) + type: str + private_ssh_key: + description: + - private key for accessing vm + - mutually_exclusive with C(password) + type: str + working_directory: + description: + - directory path to be created and used by ndb for its scripts + type: str + default: "/tmp" + automated_patching: + description: + - configure automated patching using maintenance windows + - to be only used while creation + type: dict + suboptions: + maintenance_window: + description: + - maintenance window details + type: dict + suboptions: + name: + description: + - name of maintenance window + - mutually exclusive with C(uuid) + type: str + uuid: + description: + - uuid of maintenance window + - mutually exclusive with C(name) + type: str + tasks: + description: + - list of maintenance pre-post tasks + type: list + elements: dict + suboptions: + type: + description: + - type of patching + type: str + choices: ["OS_PATCHING", "DB_PATCHING"] + pre_task_cmd: + description: + - full os command which needs to run before patching task in db server vm + type: str + post_task_cmd: + description: + - full os command which needs to run after patching task in db server vm + type: str + +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: register db server vm + ntnx_ndb_register_db_server_vm: + ip: "{{vm_ip}}" + desc: "register-vm-desc" + cluster: + name: "{{cluster.cluster1.name}}" + postgres: + listener_port: 5432 + software_path: "{{postgres.software_home}}" + username: "{{vm_username}}" + password: "{{vm_password}}" + register: result + +""" +RETURN = r""" +response: + description: database server intent response + returned: always + type: dict + sample: { + "id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "test-setup-dnd", + "description": "DBServer for test-setup-dnd", + "dateCreated": "2023-02-24 07:42:55", + "dateModified": "2023-02-28 09:44:34", + "properties": [ + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "software_profile_version_id", + "value": "ab966132-7d7d-4418-b89d-dc814c2ef1b3", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "current_op_id", + "value": "32536509-0ca0-4475-a347-016c23855bfd", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "isEraCreated", + "value": "true", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "software_home", + "value": "/usr/pgsql-10.4", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "vm_ip_address_list", + "value": "xx.xx.xx.xx", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "working_dir", + "value": "/tmp", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "os_type", + "value": "linux", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "application_type", + "value": "postgres_database", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "application_version", + "value": "10.4", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "os_info", + "value": "Linux test-setup-dnd 5.10.0-1.el7.elrepo.x86_64 #1 SMP Sun Dec 13 18:34:48 EST 2020 x86_64 x86_64 x86_64 GNU/Linux\n", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "node_type", + "value": "database", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "era_base", + "value": "/opt/era_base", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "era_user", + "value": "era", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "compute_profile_id", + "value": "19b1241e-d4e0-411e-abfc-6639ba713d13", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "network_profile_id", + "value": "6cf4fe44-5030-41a5-a0cd-4e62a55cd85a", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "software_profile_id", + "value": "96b3c1a2-4427-41c1-87eb-a942c52247a2", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "vm_cpu_count", + "value": "1", + "secure": false, + "description": null + }, + { + "ref_id": "7615993c-8455-4bc6-b562-8075a840991e", + "name": "vm_core_count", + "value": "1", + "secure": false, + "description": null + } + ], + "tags": [], + "eraCreated": true, + "dbserverClusterId": null, + "vmClusterName": "test-setup-dnd", + "vmClusterUuid": "1626600d-aa20-438e-94e8-3d3f0a5c948f", + "ipAddresses": [ + "10.44.78.125" + ], + "fqdns": null, + "macAddresses": [ + "" + ], + "type": "DBSERVER", + "status": "UP", + "clientId": "147e09d5-53fd-4da8-8a46-6c82d7ab5c6e", + "nxClusterId": "0a3b964f-8616-40b9-a564-99cf35f4b8d8", + "eraDriveId": "44dcffdf-235b-465f-b07f-ad253c26d93b", + "eraVersion": "2.5.1", + "vmTimeZone": "UTC", + "vmInfo": { + "secureInfo": null, + "info": null, + "deregisterInfo": null, + "osType": null, + "osVersion": null, + "distribution": null, + "networkInfo": [ + { + "vlanName": "vlan.sds", + "vlanUuid": "61213511-6383-4a38-9ac8-4a552c0e5865", + "vlanType": "Static", + } + ] + }, + "info": null, + "metric": null, + "clustered": false, + "requestedVersion": null, + "is_server_driven": false, + "associated_time_machine_id": null, + "time_machine_info": null, + "eraDrive": null, + "databases": null, + "clones": null, + "accessKey": null, + "softwareInstallations": null, + "protectionDomainId": "ef185e83-fc47-4111-bff5-3e5f003bb610", + "protectionDomain": null, + "queryCount": 0, + "databaseType": "postgres_database", + "dbserverInValidEaState": true, + "workingDirectory": "/tmp", + "validDiagnosticBundleState": true, + "windowsDBServer": false, + "associatedTimeMachineIds": null, + "accessKeyId": "ed3c5a82-c5c1-4728-85e1-d38cba63c107" +} +uuid: + description: created db server UUID + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" +""" +import time # noqa: E402 +from copy import deepcopy # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.db_server_vm import DBServerVM # noqa: E402 +from ..module_utils.ndb.maintenance_window import ( # noqa: E402 + AutomatedPatchingSpec, + MaintenanceWindow, +) +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + mutually_exclusive = [("name", "uuid")] + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + automated_patching = deepcopy( + AutomatedPatchingSpec.automated_patching_argument_spec + ) + + postgres = dict( + listener_port=dict(type="str", default="5432", required=False), + software_path=dict(type="str", required=True), + ) + module_args = dict( + ip=dict(type="str", required=True), + desc=dict(type="str", required=False), + reset_desc_in_ntnx_cluster=dict(type="bool", default=False, required=False), + cluster=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=True, + ), + postgres=dict(type="dict", options=postgres, required=False), + username=dict(type="str", required=True), + password=dict(type="str", required=False, no_log=True), + private_ssh_key=dict(type="str", required=False, no_log=True), + automated_patching=dict( + type="dict", options=automated_patching, required=False + ), + working_directory=dict(type="str", default="/tmp", required=False), + ) + return module_args + + +def get_register_spec(module, result): + db_server_vms = DBServerVM(module) + default_spec = db_server_vms.get_default_spec_for_registration() + spec, err = db_server_vms.get_spec(old_spec=default_spec, register_server=True) + if err: + result["error"] = err + err_msg = "Failed getting spec for db server vm registration" + module.fail_json(msg=err_msg, **result) + + # configure automated patching + if module.params.get("automated_patching"): + mw = MaintenanceWindow(module) + mw_spec, err = mw.get_spec(configure_automated_patching=True) + if err: + result["error"] = err + err_msg = "Failed getting spec for automated patching for db server vm" + module.fail_json(msg=err_msg, **result) + spec["maintenanceTasks"] = mw_spec + + if err: + result["error"] = err + err_msg = "Failed getting spec for db server vm registration" + module.fail_json(msg=err_msg, **result) + + # populate database engine related spec + spec, err = db_server_vms.get_db_engine_spec(spec, register=True) + if err: + result["error"] = err + err_msg = "Failed getting database engine related spec for database instance registration" + module.fail_json(msg=err_msg, **result) + + return spec + + +def register_db_server(module, result): + db_server_vms = DBServerVM(module) + + spec = get_register_spec(module, result) + + if module.check_mode: + result["response"] = spec + return + + resp = db_server_vms.register(data=spec) + result["response"] = resp + result["uuid"] = resp["entityId"] + db_uuid = resp["entityId"] + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(5) # to get operation ID functional + operations.wait_for_completion(ops_uuid) + resp = db_server_vms.read(db_uuid) + result["response"] = resp + + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[("state", "present", ("postgres",), True)], + mutually_exclusive=[("private_ssh_key", "password")], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "uuid": None} + register_db_server(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_replicate_database_snapshots.py b/plugins/modules/ntnx_ndb_replicate_database_snapshots.py new file mode 100644 index 000000000..b3f8203e5 --- /dev/null +++ b/plugins/modules/ntnx_ndb_replicate_database_snapshots.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_replicate_database_snapshots +short_description: module for replicating database snapshots across clusters of time machine +version_added: 1.8.0 +description: + - module for replicating snapshots across clusters of time machine + - replication to one cluster at a time is supported +options: + expiry_days: + description: + - expiry days for removal of snapshot + type: str + snapshot_uuid: + description: + - snapshot uuid + type: str + clusters: + description: + - cluster details where snapshot needs to be replicated + - currently, replication to only one cluster at once is supported + type: list + elements: dict + required: true + suboptions: + name: + description: + - cluster name + - Mutually exclusive with C(uuid) + type: str + uuid: + description: + - cluster UUID + - Mutually exclusive with C(name) + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: replicate snapshot with expiry + ntnx_ndb_replicate_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + clusters: + - name: "{{cluster.cluster2.name}}" + expiry_days: 20 + register: result +""" +RETURN = r""" +response: + description: snapshot replication response + returned: always + type: dict + sample: { + "ancestorOpIds": null, + "childOperations": [], + "dateSubmitted": "2023-02-28 11:13:32", + "dbserverId": "e507345b-c7d1-469a-8297-3088f017f120", + "dbserverStatus": "UP", + "deferredBy": null, + "deferredByOpIds": null, + "endTime": "2023-02-28 11:14:15", + "entityId": "7a39664b-dfb7-4529-887c-6d91f7e18604", + "entityName": "test-setup-dnd_TM (DATABASE)", + "entityType": "ERA_TIME_MACHINE", + "id": "79d57f0b-4ce6-4a22-a28b-dbd1839c4d0a", + "instanceId": null, + "isInternal": false, + "message": null, + "metadata": { + "ancestorOpIds": null, + "associatedEntities": null, + "controlMessage": null, + "executionContext": { + "affectedDBServers": [ + "e507345b-c7d1-469a-8297-3088f017f120" + ], + "applicationType": "generic", + "extendedAffectedDBServers": [ + "e507345b-c7d1-469a-8297-3088f017f120" + ] + }, + "linkedOperations": null, + "linkedOperationsDescription": "Associated operation(s)", + "oldStatus": null, + "retriedOperations": null, + "retryImmediateParentId": null, + "retryParentId": null, + "rollbackForOperationId": null, + "rollbackOperation": false, + "scheduleTime": null, + "scheduledBy": "2ba56951-859a-4548-893e-9a8649362786", + "scheduledOn": "2023-02-28 11:13:31", + "switchedDbservers": null, + "userRequestedAction": "0", + "userRequestedActionTimestamp": null, + "workCompletionCallbackExecuted": true + }, + "name": "xxxxxxxxx", + "nxClusterId": "94c3e490-69e2-4144-83ff-68867e47889d", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "parentId": null, + "parentStep": 0, + "percentageComplete": "100", + "properties": [], + "scheduleTime": null, + "setStartTime": false, + "startTime": "2023-02-28 11:13:36", + "status": "5", + "stepGenEnabled": false, + "steps": [], + "systemTriggered": false, + "timeZone": "UTC", + "timeout": 1450, + "timeoutInfo": { + "timeRemaining": 1449, + "timeout": 1450, + "timeoutTimestamp": "2023-03-01 11:23:36", + "timezone": "UTC" + }, + "type": "replicate_snapshot", + "uniqueName": null, + "userRequestedAction": "0", + "userRequestedActionTime": null, + "userVisible": true, + "work": null + } +snapshot_uuid: + description: snapshot uuid + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" +""" +import time # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.ndb.snapshots import Snapshot # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + +# Notes: +# 1. Snapshot replication to one cluster at a time is supported currently + + +def get_module_spec(): + mutually_exclusive = [("name", "uuid")] + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + + module_args = dict( + snapshot_uuid=dict(type="str", required=False), + clusters=dict( + type="list", + elements="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + required=True, + ), + expiry_days=dict(type="str", required=False), + ) + return module_args + + +def replicate_snapshot(module, result): + + snapshot_uuid = module.params.get("snapshot_uuid") + if not snapshot_uuid: + module.fail_json( + msg="snapshot_uuid is required field for replication", **result + ) + + _snapshot = Snapshot(module) + snapshot = _snapshot.read(uuid=snapshot_uuid) + time_machine_uuid = snapshot.get("timeMachineId") + + spec, err = _snapshot.get_replicate_snapshot_spec() + if err: + result["error"] = err + module.fail_json(msg="Failed generating snapshot create spec", **result) + + result["snapshot_uuid"] = snapshot_uuid + + if module.check_mode: + result["response"] = spec + return + + resp = _snapshot.replicate( + uuid=snapshot_uuid, time_machine_uuid=time_machine_uuid, data=spec + ) + result["response"] = resp + + if module.params.get("wait"): + ops_uuid = resp["operationId"] + operations = Operation(module) + time.sleep(3) + resp = operations.wait_for_completion(ops_uuid, delay=5) + result["response"] = resp + + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("snapshot_uuid",), True), + ], + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "snapshot_uuid": None} + + replicate_snapshot(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_slas.py b/plugins/modules/ntnx_ndb_slas.py new file mode 100644 index 000000000..eb04fa925 --- /dev/null +++ b/plugins/modules/ntnx_ndb_slas.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_slas +short_description: moudle for creating, updating and deleting slas +version_added: 1.8.0 +description: moudle for creating, updating and deleting slas +options: + name: + description: + - sla name + type: str + desc: + description: + - sla description + type: str + sla_uuid: + description: + - sla uuid + type: str + frequency: + description: + - frequency of retention + type: dict + suboptions: + logs_retention: + description: + - duration in days for which the transaction logs are retained in NDB + type: int + snapshots_retention: + description: + - snapshot retention details + type: dict + suboptions: + daily: + description: + - duration in days for which a daily snapshot must be retained in NDB + type: int + weekly: + description: + - duration in weeks for which a weekly snapshot must be retained in NDB + type: int + monthly: + description: + - duration in months for which a monthly snapshot must be retained in NDB + type: int + quarterly: + description: + - duration in quarters for which a quarterly snapshot must be retained in NDB + type: int +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: Verify creation of slas + ntnx_ndb_slas: + name: "{{profile1_name}}" + desc: "testdesc" + frequency: + logs_retention: "{{frequency.logs_retention}}" + snapshots_retention: + daily: "{{frequency.snapshots_retention.daily}}" + weekly: "{{frequency.snapshots_retention.weekly}}" + monthly: "{{frequency.snapshots_retention.monthly}}" + quarterly: "{{frequency.snapshots_retention.quarterly}}" + register: result +""" + +RETURN = r""" +response: + description: slas create response + returned: always + type: dict + sample: { + "id": "13d1dabe-d4aa-4ab5-b8c4-7adb4c41af03", + "name": "check", + "uniqueName": "CHECK", + "description": "check", + "ownerId": "eac70dbf-22fb-462b-9498-949796ca1f73", + "systemSla": false, + "dateCreated": null, + "dateModified": null, + "continuousRetention": 30, + "dailyRetention": 7, + "weeklyRetention": 2, + "monthlyRetention": 2, + "quarterlyRetention": 1, + "yearlyRetention": 0, + "referenceCount": 0, + "pitrEnabled": true, + "currentActiveFrequency": "CONTINUOUS" + } +sla_uuid: + description: sla uuid + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" +""" + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.slas import SLA # noqa: E402 +from ..module_utils.utils import ( # noqa: E402 + remove_param_with_none_value, + strip_extra_attrs, +) + + +def get_module_spec(): + + snapshot_retention = dict( + daily=dict(type="int"), + weekly=dict(type="int"), + monthly=dict(type="int"), + quarterly=dict(type="int"), + ) + frequency = dict( + logs_retention=dict(type="int"), + snapshots_retention=dict(type="dict", options=snapshot_retention), + ) + module_args = dict( + sla_uuid=dict(type="str"), + name=dict(type="str"), + desc=dict(type="str"), + frequency=dict(type="dict", options=frequency), + ) + return module_args + + +def create_sla(module, result): + sla = SLA(module) + + name = module.params["name"] + uuid = sla.get_uuid(value=name, raise_error=False) + if uuid: + module.fail_json(msg="sla with given name already exists", **result) + + spec, err = sla.get_spec() + if err: + result["error"] = err + module.fail_json(msg="Failed generating create sla spec", **result) + + if module.check_mode: + result["response"] = spec + return + + resp = sla.create(data=spec) + result["response"] = resp + result["sla_uuid"] = resp["id"] + result["changed"] = True + + +def update_sla(module, result): + _sla = SLA(module) + + uuid = module.params.get("sla_uuid") + if not uuid: + module.fail_json(msg="uuid is required field for update", **result) + result["sla_uuid"] = uuid + + sla, err = _sla.get_sla(uuid=uuid) + if err: + module.fail_json(msg="Failed fetching sla info", **result) + + if sla.get("systemSla"): + module.fail_json(msg="system slas update is not allowed", **result) + + default_update_spec = _sla.get_default_update_spec() + strip_extra_attrs(sla, default_update_spec) + + spec, err = _sla.get_spec(old_spec=sla) + if err: + result["error"] = err + module.fail_json(msg="Failed generating update sla spec", **result) + + if module.check_mode: + result["response"] = spec + return + + if spec == sla: + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + resp = _sla.update(data=spec, uuid=uuid) + result["response"] = resp + result["sla_uuid"] = uuid + result["changed"] = True + + +def delete_sla(module, result): + _sla = SLA(module) + + uuid = module.params.get("sla_uuid") + if not uuid: + module.fail_json(msg="uuid is required field for delete", **result) + + sla, err = _sla.get_sla(uuid=uuid) + if err: + module.fail_json(msg="Failed fetching sla info", **result) + + if sla.get("systemSla"): + module.fail_json(msg="system slas delete is not allowed", **result) + + resp = _sla.delete(data=sla, uuid=uuid) + result["response"] = resp + if resp.get("status") != "success": + module.fail_json( + msg="sla delete failed", + response=resp, + ) + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + required_if=[ + ("state", "present", ("name", "sla_uuid"), True), + ("state", "absent", ("sla_uuid",)), + ], + supports_check_mode=True, + ) + + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "sla_uuid": None} + if module.params["state"] == "present": + if module.params.get("sla_uuid"): + update_sla(module, result) + else: + create_sla(module, result) + else: + delete_sla(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_slas_info.py b/plugins/modules/ntnx_ndb_slas_info.py index 35bbbb102..b16f6b43c 100644 --- a/plugins/modules/ntnx_ndb_slas_info.py +++ b/plugins/modules/ntnx_ndb_slas_info.py @@ -11,7 +11,7 @@ --- module: ntnx_ndb_slas_info short_description: info module for ndb slas -version_added: 1.8.0-beta.1 +version_added: 1.8.0 description: 'Get sla info' options: name: @@ -20,10 +20,10 @@ type: str uuid: description: - - sla id + - sla uuid type: str extends_documentation_fragment: - - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_ndb_info_base_module author: - Prem Karat (@premkarat) - Gevorg Khachatryan (@Gevorg-Khachatryan-97) diff --git a/plugins/modules/ntnx_ndb_snapshots_info.py b/plugins/modules/ntnx_ndb_snapshots_info.py new file mode 100644 index 000000000..bd34bded9 --- /dev/null +++ b/plugins/modules/ntnx_ndb_snapshots_info.py @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_snapshots_info +short_description: info module for ndb snapshots info +version_added: 1.8.0 +description: info module for ndb snapshots +options: + uuid: + description: + - snapshot uuid + type: str + get_files: + description: + - enable this flag to get snapshot files + type: bool + filters: + description: + - filters spec + type: dict + suboptions: + all: + description: + - include all snapshots from all clusters + type: bool + database_ids: + description: + - Comma separated list of database-ids for which snapshots are to be fetched + type: list + elements: str + value: + description: + - corresponding value for value type + type: str + value_type: + description: + - select value type for querying + type: str + choices: ["type","status","protection-domain-id","time-machine"] + time_zone: + description: + - timezone related to C(pitr_timestamp) + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_info_base_module +author: + - Prem Karat (@premkarat) + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: List era snapshots + ntnx_ndb_snapshots_info: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + register: snapshots + +- name: get era snapshots using it's id + ntnx_ndb_snapshots_info: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + uuid: "" + register: result + +- name: get era snapshot files using it's id + ntnx_ndb_snapshots_info: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + uuid: "" + get_files: true + register: result + +""" +RETURN = r""" +response: + description: listing all db servers + returned: always + type: list + sample: [ +{ + "accessLevel": null, + "appInfoVersion": "2", + "applicableTypes": [ + "CONTINUOUS_EXTRA" + ], + "databaseNodeId": "000-0000-0000-0000", + "databaseSnapshot": false, + "dateCreated": "2022-11-29 09:21:26", + "dateModified": "2022-12-01 06:22:22", + "dbServerStorageMetadataVersion": 1, + "dbserverId": null, + "dbserverIp": null, + "dbserverName": null, + "description": null, + "fromTimeStamp": "2022-11-29 09:20:52", + "id": "000-0000-0000-0000", + "info": { + "databaseGroupId": null, + "databases": null, + "info": null, + "linkedDatabases": [ + { + "appConsistent": false, + "clone": false, + "databaseName": "template0", + "id": "000-0000-0000-0000", + "info": { + "info": { + "created_by": "system" + } + }, + "message": null, + "status": "READY" + }, + { + "appConsistent": false, + "clone": false, + "databaseName": "postgres", + "id": "000-0000-0000-0000", + "info": { + "info": { + "created_by": "system" + } + }, + "message": null, + "status": "READY" + }, + { + "appConsistent": false, + "clone": false, + "databaseName": "template1", + "id": "000-0000-0000-0000", + "info": { + "info": { + "created_by": "system" + } + }, + "message": null, + "status": "READY" + }, + { + "appConsistent": false, + "clone": false, + "databaseName": "testdb1", + "id": "000-0000-0000-0000", + "info": { + "info": { + "created_by": "user" + } + }, + "message": null, + "status": "READY" + } + ], + "missingDatabases": null, + "replicationHistory": null, + "secureInfo": null + }, + "lcmConfig": null, + "metadata": { + "async": false, + "curationRetryCount": 0, + "deregisterInfo": null, + "fromTimeStamp": "2022-11-29 09:20:52", + "info": null, + "lastReplicationRetrySourceSnapshotId": null, + "lastReplicationRetryTimestamp": null, + "operationsUsingSnapshot": [], + "replicationRetryCount": 0, + "secureInfo": null, + "standby": false, + "toTimeStamp": "2022-11-29 09:20:52" + }, + "metric": { + "lastUpdatedTimeInUTC": null, + "storage": { + "allocatedSize": 0.0, + "controllerAvgIoLatencyUsecs": null, + "controllerNumIops": null, + "lastUpdatedTimeInUTC": null, + "size": 67207168.0, + "unit": "B", + "usedSize": 0.0 + } + }, + "name": "era_auto_snapshot", + "nxClusterId": "000-0000-0000-0000", + "ownerId": "000-0000-0000-0000", + "parentSnapshot": true, + "parentSnapshotId": null, + "processed": false, + "properties": [], + "protectionDomainId": "000-0000-0000-0000", + "replicatedSnapshots": null, + "sanitised": false, + "sanitisedFromSnapshotId": null, + "sanitisedSnapshots": null, + "snapshotFamily": null, + "snapshotId": "53553", + "snapshotSize": 67207168.0, + "snapshotTimeStamp": "2022-11-29 09:20:52", + "snapshotTimeStampDate": 1669713652000, + "snapshotUuid": "53553", + "softwareDatabaseSnapshot": false, + "softwareSnapshot": null, + "softwareSnapshotId": "000-0000-0000-0000", + "status": "ACTIVE", + "tags": [], + "timeMachineId": "000-0000-0000-0000", + "timeZone": "UTC", + "toTimeStamp": "2022-11-29 09:20:52", + "type": "CONTINUOUS_EXTRA" + }, + ] +""" + +from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.ndb.snapshots import Snapshot # noqa: E402 +from ..module_utils.utils import format_filters_map # noqa: E402 + + +def get_module_spec(): + + filters_spec = dict( + all=dict(type="bool"), + database_ids=dict(type="list", elements="str"), + value=dict(type="str"), + value_type=dict( + type="str", + choices=[ + "type", + "status", + "protection-domain-id", + "time-machine", + ], + ), + time_zone=dict(type="str"), + ) + + module_args = dict( + uuid=dict(type="str"), + get_files=dict(type="bool"), + filters=dict( + type="dict", + options=filters_spec, + ), + ) + + return module_args + + +def get_snapshot(module, result): + snapshot = Snapshot(module) + uuid = module.params["uuid"] + get_files = module.params["get_files"] + if get_files: + resp = snapshot.get_snapshot_files(uuid=uuid) + else: + resp = snapshot.read(uuid=uuid) + + result["response"] = resp + result["snapshot_uuid"] = uuid + + +def get_snapshots(module, result): + snapshot = Snapshot(module) + query_params = module.params.get("filters") + query_params = format_filters_map(query_params) + + resp = snapshot.get_snapshots(query_params=query_params) + + result["response"] = resp + + +def run_module(): + module = NdbBaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + required_by={"get_files": "uuid"}, + mutually_exclusive=[("uuid", "filters")], + ) + result = {"changed": False, "error": None, "response": None} + if module.params.get("uuid"): + get_snapshot(module, result) + else: + get_snapshots(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_stretched_vlans.py b/plugins/modules/ntnx_ndb_stretched_vlans.py new file mode 100644 index 000000000..bebead6d0 --- /dev/null +++ b/plugins/modules/ntnx_ndb_stretched_vlans.py @@ -0,0 +1,240 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_stretched_vlans +short_description: Module for create, update and delete of stretched vlan. +version_added: 1.8.0 +description: Module for create, update and delete of stretched vlan. +options: + stretched_vlan_uuid: + description: + - uuid for update or delete of stretched vlan + type: str + vlans: + description: + - list of vlans uuids + type: list + elements: str + name: + description: + - name of stretched vlan + - update allowed + type: str + desc: + description: + - description of stretched vlan + type: str + gateway: + description: + - The gateway ip address + type: str + subnet_mask: + description: + - Subnet network address + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" + - name: create ndb stretched vlan + ntnx_ndb_stretched_vlans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + name: test-vlan-name + vlans: + - "00000000-0000-0000-0000-000000000000" + - "00000000-0000-0000-0000-000000000000" + register: result + + - name: update ndb stretched vlan + ntnx_ndb_stretched_vlans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + stretched_vlan_uuid: "" + vlans: + - "00000000-0000-0000-0000-000000000000" + - "00000000-0000-0000-0000-000000000000" + register: result + + - name: Delete stretched vlan + ntnx_ndb_stretched_vlans: + nutanix_host: "" + nutanix_username: + nutanix_password: + validate_certs: false + state: absent + stretched_vlan_uuid: "" + register: result + +""" + +RETURN = r""" +response: + description: stretched vlan creation response after provisioning + returned: always + type: dict + sample: {} +stretched_vlan_uuid: + description: created stretched vlan UUID + returned: always + type: str + sample: "00000-0000-000-0000-000000" +name: + description: stretched vlan name + returned: always + type: str + sample: "test-name" +""" + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.stretched_vlans import StretchedVLAN # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + + module_args = dict( + name=dict(type="str"), + desc=dict(type="str"), + stretched_vlan_uuid=dict(type="str"), + gateway=dict(type="str"), + subnet_mask=dict(type="str"), + vlans=dict(type="list", elements="str"), + ) + return module_args + + +def create_stretched_vlan(module, result): + stretched_vlan = StretchedVLAN(module) + + spec, err = stretched_vlan.get_spec() + if err: + result["error"] = err + module.fail_json(msg="Failed generating create stretched vlan spec", **result) + + if module.check_mode: + result["response"] = spec + return + + resp = stretched_vlan.create_stretched_vlan(data=spec) + + result["response"] = resp + stretched_vlan_uuid = resp["id"] + result["stretched_vlan_uuid"] = stretched_vlan_uuid + result["changed"] = True + + +def update_stretched_vlan(module, result): + stretched_vlan = StretchedVLAN(module) + + uuid = module.params.get("stretched_vlan_uuid") + if not uuid: + module.fail_json( + msg="stretched_vlan_uuid is required field for update", **result + ) + resp, err = stretched_vlan.get_stretched_vlan(uuid=uuid) + if err: + result["error"] = err + module.fail_json(msg="Failed generating update stretched vlan spec", **result) + + old_spec = stretched_vlan.get_default_update_spec(override_spec=resp) + + update_spec, err = stretched_vlan.get_spec(old_spec=old_spec) + if err: + result["error"] = err + module.fail_json(msg="Failed generating update stretched vlan spec", **result) + + if module.check_mode: + result["response"] = update_spec + return + + if check_for_idempotency(old_spec, update_spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + resp = stretched_vlan.update_stretched_vlan(data=update_spec, uuid=uuid) + + result["response"] = resp + result["stretched_vlan_uuid"] = uuid + result["changed"] = True + + +def delete_stretched_vlan(module, result): + stretched_vlan = StretchedVLAN(module) + + uuid = module.params.get("stretched_vlan_uuid") + if not uuid: + module.fail_json( + msg="stretched_vlan_uuid is required field for delete", **result + ) + + resp = stretched_vlan.delete_stretched_vlan(uuid) + + result["response"] = resp + result["changed"] = True + + +def check_for_idempotency(old_spec, update_spec): + + for key, value in update_spec.items(): + if old_spec.get(key) != value: + return False + + return True + + +def run_module(): + + module = NdbBaseModule( + argument_spec=get_module_spec(), + required_if=[ + ("state", "present", ("name", "stretched_vlan_uuid"), True), + ("state", "absent", ("stretched_vlan_uuid",)), + ], + required_by={ + "gateway": "stretched_vlan_uuid", + "subnet_mask": "stretched_vlan_uuid", + }, + supports_check_mode=True, + ) + remove_param_with_none_value(module.params) + result = { + "changed": False, + "error": None, + "response": None, + "stretched_vlan_uuid": None, + } + if module.params["state"] == "present": + if module.params.get("stretched_vlan_uuid"): + update_stretched_vlan(module, result) + else: + create_stretched_vlan(module, result) + else: + delete_stretched_vlan(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_tags.py b/plugins/modules/ntnx_ndb_tags.py new file mode 100644 index 000000000..4c473805c --- /dev/null +++ b/plugins/modules/ntnx_ndb_tags.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_tags +short_description: module for create, update and delete of tags +version_added: 1.8.0 +description: module for create, update and delete of tags +options: + name: + description: + - name of tag + - update is allowed + type: str + uuid: + description: + - uuid of tag for update and delete + type: str + desc: + description: + - description of tag + - allowed to update + type: str + entity_type: + description: + - entity type it needs to be associated + type: str + choices: ["DATABASE", "CLONE", "TIME_MACHINE", "DATABASE_SERVER"] + tag_value_required: + description: + - if value is required by user while adding this tag to entity + - update is allowed + type: bool + status: + description: + - status of tag + - update is allowed + type: str + choices: ["ENABLED", "DEPRECATED"] +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: create tags for clone + ntnx_ndb_tags: + name: "{{tag_name}}-clone" + desc: tag-created-by-ansible + tag_value_required: False + entity_type: CLONE + register: result + +- name: create tags for time machine + ntnx_ndb_tags: + name: "{{tag_name}}-tm" + desc: tag-created-by-ansible + tag_value_required: True + entity_type: TIME_MACHINE + register: result + +- name: create tags for database server + ntnx_ndb_tags: + name: "{{tag_name}}-database-server" + desc: tag-created-by-ansible + tag_value_required: DATABASE_SERVER + entity_type: CLONE + register: result +""" +RETURN = r""" +response: + description: tag intent response + returned: always + type: dict + sample: { + "description": "check123", + "entityType": "CLONE", + "name": "tag-ansible-updated", + "owner": "eac70dbf-22fb-462b-9498-949796ca1f73", + "required": true, + "status": "ENABLED", + "values": 0 + } +uuid: + description: tag uuid + returned: always + type: str + sample: "be524e70-60ad-4a8c-a0ee-8d72f954d7e6" + +""" + +from ..module_utils.ndb.base_info_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.tags import Tag # noqa: E402 +from ..module_utils.utils import strip_extra_attrs # noqa: E402 + + +def get_module_spec(): + + module_args = dict( + name=dict(type="str", required=False), + uuid=dict(type="str", required=False), + desc=dict(type="str", required=False), + entity_type=dict( + type="str", + choices=["DATABASE", "CLONE", "TIME_MACHINE", "DATABASE_SERVER"], + required=False, + ), + tag_value_required=dict(type="bool", required=False), + status=dict( + type="str", choices=["ENABLED", "DEPRECATED"], required=False + ), # deprecate will disallow tags addition + ) + return module_args + + +def create_tags(module, result): + tags = Tag(module) + + spec, err = tags.get_spec() + if err: + result["error"] = err + return module.fail_json(msg="Failed generating tag create spec", **result) + + if module.check_mode: + result["response"] = spec + return + + resp = tags.create(data=spec) + result["response"] = resp + + # get uuid + uuid, err = tags.get_tag_uuid(name=resp["name"], entity_type=resp["entityType"]) + if err: + result["error"] = err + return module.fail_json(msg="Failed fetching tag uuid post creation", **result) + + result["uuid"] = uuid + + result["changed"] = True + + +def check_tags_idempotency(old_spec, new_spec): + + # spec args allowed to updated + args = ["name", "description", "required", "status"] + + for arg in args: + if old_spec.get(arg) != new_spec.get(arg): + return False + + return True + + +def update_tags(module, result): + tags = Tag(module) + uuid = module.params.get("uuid") + if not uuid: + module.fail_json(msg="'uuid' is required field for update", **result) + + tag = tags.read(uuid=uuid) + if not tag: + module.fail_json(msg="Failed fetching tag info", **result) + + default_spec = tags.get_default_update_spec() + strip_extra_attrs(tag, default_spec) + spec, err = tags.get_spec(old_spec=tag) + if err: + result["error"] = err + return module.fail_json("Failed generating tag update spec", **result) + + if module.check_mode: + result["response"] = spec + return + + if check_tags_idempotency(old_spec=tag, new_spec=spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + resp = tags.update(uuid=uuid, data=spec) + result["response"] = resp + result["changed"] = True + result["uuid"] = uuid + + +def delete_tags(module, result): + tags = Tag(module) + uuid = module.params.get("uuid") + if not uuid: + module.fail_json(msg="'uuid' is required field for delete", **result) + + resp = tags.delete(uuid=uuid) + result["response"] = resp + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[("state", "present", ("name", "uuid"), True)], + required_by={ + "status": "uuid", + }, + mutually_exclusive=[("uuid", "entity_type")], + ) + result = {"changed": False, "error": None, "response": None, "uuid": None} + if module.params.get("state", "present") == "present": + if module.params.get("uuid"): + update_tags(module, result) + else: + create_tags(module, result) + else: + delete_tags(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_time_machine_clusters.py b/plugins/modules/ntnx_ndb_time_machine_clusters.py new file mode 100644 index 000000000..576abf9f5 --- /dev/null +++ b/plugins/modules/ntnx_ndb_time_machine_clusters.py @@ -0,0 +1,340 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_time_machine_clusters +short_description: Module for create, update and delete for data access management in time machines. +version_added: 1.8.0 +description: + - Module for create, update and delete for data access management i.e. clusters for snapshots in time machines. + - Currently only postgres type database is tested and supported +options: + time_machine_uuid: + description: UUID of the time machine + type: str + required: true + cluster: + description: + - Name or UUID of the cluster + - if cluster is not present in time machine, then it will add it + - if cluster is present then it will update cluster config in time machine + type: dict + suboptions: + name: + description: + - Cluster Name + - Mutually exclusive with C(uuid) + type: str + uuid: + description: + - Cluster UUID + - Mutually exclusive with C(name) + type: str + type: + description: + - type of data access instance + - update allowed + type: str + default: "OTHER" + choices: ["OTHER", "PRIMARY"] + sla: + description: + - Name or UUID of the sla + - Update allowed + type: dict + suboptions: + name: + description: + - Sla Name + - Mutually exclusive with C(uuid) + type: str + uuid: + description: + - Sla UUID + - Mutually exclusive with C(name) + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" + +- name: create data access instance with cluster name and sla name + ntnx_ndb_time_machine_clusters: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + time_machine_uuid: "" + cluster: + name: "" + sla: + name: "" + register: out + +- name: update data access instance with new sla name + ntnx_ndb_time_machine_clusters: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + time_machine_uuid: "" + cluster: + name: "" + sla: + name: "" + register: result + +- name: delete time machine + ntnx_ndb_time_machine_clusters: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: absent + time_machine_uuid: "" + cluster: + uuid: "" + register: result + +""" + +RETURN = r""" +response: + description: An intentful representation of a TM status + returned: always + type: dict + sample: { + "dateCreated": "2023-01-22 08:03:46", + "dateModified": "2023-01-22 08:03:46", + "description": null, + "forceVGBasedLogDrive": false, + "info": null, + "logDrive": null, + "logDriveId": null, + "logDriveStatus": "NOT_INITIALIZED", + "metadata": null, + "nxCluster": null, + "nxClusterId": "0000000-000000-00000-0000", + "ownerId": "0000000-000000-00000-0000", + "resetDescription": false, + "resetSlaId": false, + "resetType": false, + "schedule": null, + "scheduleId": "0000000-000000-00000-0000", + "sla": null, + "slaId": "0000000-000000-00000-0000", + "source": false, + "sourceClusters": null, + "status": "ACTIVE", + "storageResourceId": null, + "submitActivateTimeMachineOp": false, + "timeMachineId": "0000000-000000-00000-0000", + "type": "OTHER", + "updateOperationSummary": null + } +time_machine_uuid: + description: created data access instance UUID + returned: always + type: str + sample: "0000000-000000-00000-0000" +""" + +import time # noqa: E402 + +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.operations import Operation # noqa: E402 +from ..module_utils.ndb.time_machines import TimeMachine, get_cluster_uuid # noqa: E402 + + +def get_module_spec(): + mutually_exclusive = [("name", "uuid")] + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + module_args = dict( + time_machine_uuid=dict(type="str", required=True), + cluster=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + ), + type=dict( + type="str", required=False, default="OTHER", choices=["OTHER", "PRIMARY"] + ), + sla=dict( + type="dict", + options=entity_by_spec, + mutually_exclusive=mutually_exclusive, + ), + ) + return module_args + + +def create_data_access_instance(module, result): + tm = TimeMachine(module) + tm_uuid = module.params["time_machine_uuid"] + if not module.params.get("cluster"): + module.fail_json(msg="cluster is required field", **result) + + cluster_uuid, err = get_cluster_uuid(module, module.params["cluster"]) + if err: + result["error"] = err + err_msg = "'cluster' is required field for adding cluster in time machine" + module.fail_json(msg=err_msg, **result) + + cluster_in_time_machine = tm.check_if_cluster_exists(tm_uuid, cluster_uuid) + + # if cluster exist in time machine trigger update flow + if cluster_in_time_machine: + update_data_access_instance(module, result) + return + + spec, err = tm.get_data_access_management_spec() + if err: + result["error"] = err + module.fail_json( + msg="Failed generating spec for adding cluster in time machine", **result + ) + result["time_machine_uuid"] = tm_uuid + + if module.check_mode: + result["response"] = spec + return + + resp = tm.create_data_access_instance(tm_uuid, spec) + + if ( + module.params.get("wait") + and resp.get("updateOperationSummary") + and resp["updateOperationSummary"].get("operationId") + ): + ops_uuid = resp["updateOperationSummary"]["operationId"] + operations = Operation(module) + time.sleep(5) # wait for ops ID for functional + operations.wait_for_completion(ops_uuid) + resp = tm.read_data_access_instance(tm_uuid, cluster_uuid) + result["response"] = resp + + result["response"] = resp + result["cluster_uuid"] = resp["nxClusterId"] + result["changed"] = True + + +def check_for_idempotency(old_spec, update_spec): + attrs = ["slaId", "type"] + for attr in attrs: + if old_spec.get(attr) != update_spec.get(attr): + return False + return True + + +def update_data_access_instance(module, result): + tm = TimeMachine(module) + + tm_uuid = module.params["time_machine_uuid"] + if not module.params.get("cluster"): + module.fail_json(msg="'cluster' is required field for update", **result) + + cluster_uuid, err = get_cluster_uuid(module, module.params["cluster"]) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating spec for updating cluster in time machine", **result + ) + + result["time_machine_uuid"] = tm_uuid + + resp = tm.read_data_access_instance(tm_uuid, cluster_uuid) + + old_spec = tm.get_default_data_access_management_spec(override_spec=resp) + + spec, err = tm.get_data_access_management_spec(old_spec=old_spec) + if err: + result["error"] = err + module.fail_json( + msg="Failed generating spec for updating cluster in time machine", **result + ) + + if module.check_mode: + result["response"] = spec + return + + if check_for_idempotency(old_spec, spec): + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + resp = tm.update_data_access_instance( + data=spec, tm_uuid=tm_uuid, cluster_uuid=cluster_uuid + ) + + if ( + module.params.get("wait") + and resp.get("updateOperationSummary") + and resp["updateOperationSummary"].get("operationId") + ): + ops_uuid = resp["updateOperationSummary"]["operationId"] + operations = Operation(module) + operations.wait_for_completion(ops_uuid) + + resp = tm.read_data_access_instance(tm_uuid, cluster_uuid) + result["response"] = resp + + result["cluster_uuid"] = cluster_uuid + result["changed"] = True + + +def delete_data_access_instance(module, result): + tm = TimeMachine(module) + + tm_uuid = module.params["time_machine_uuid"] + if not module.params.get("cluster"): + module.fail_json(msg="cluster is required field", **result) + cluster_uuid, err = get_cluster_uuid(module, module.params["cluster"]) + if err: + result["error"] = err + err_msg = "'cluster' is required field for removing cluster from time machine" + module.fail_json(msg=err_msg, **result) + resp = tm.delete_data_access_instance(tm_uuid, cluster_uuid) + + result["response"] = resp + result["changed"] = True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[ + ("state", "present", ("sla",)), + ], + ) + result = { + "changed": False, + "error": None, + "response": None, + "time_machine_uuid": None, + } + if module.params["state"] == "present": + create_data_access_instance(module, result) + else: + delete_data_access_instance(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_time_machines_info.py b/plugins/modules/ntnx_ndb_time_machines_info.py index 3fcc9a15c..f3d9dc861 100644 --- a/plugins/modules/ntnx_ndb_time_machines_info.py +++ b/plugins/modules/ntnx_ndb_time_machines_info.py @@ -11,8 +11,8 @@ --- module: ntnx_ndb_time_machines_info short_description: info module for ndb time machines -version_added: 1.8.0-beta.1 -description: 'Get tm info' +version_added: 1.8.0 +description: Get time machine info options: name: description: @@ -20,10 +20,48 @@ type: str uuid: description: - - time machine id + - time machine uuid type: str + filters: + description: + - filters + type: dict + suboptions: + detailed: + description: + - get detailed response + type: bool + load_clones: + description: + - load clones + type: bool + load_database: + description: + - load database details in response + type: bool + clone_tms: + description: + - load cloned time machines + type: bool + database_tms: + description: + - write + type: bool + value: + description: + - value correponding to C(value_type) + type: str + value_type: + description: + - value type corresponding to C(value) + type: str + choices: ["id","name",] + time_zone: + description: + - get responses in certain time zone + type: str extends_documentation_fragment: - - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_ndb_info_base_module author: - Prem Karat (@premkarat) - Gevorg Khachatryan (@Gevorg-Khachatryan-97) @@ -224,34 +262,62 @@ from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 from ..module_utils.ndb.time_machines import TimeMachine # noqa: E402 +from ..module_utils.utils import format_filters_map # noqa: E402 def get_module_spec(): + filters_spec = dict( + detailed=dict(type="bool"), + load_clones=dict(type="bool"), + load_database=dict(type="bool"), + clone_tms=dict(type="bool"), + database_tms=dict(type="bool"), + value=dict(type="str"), + value_type=dict( + type="str", + choices=[ + "id", + "name", + ], + ), + time_zone=dict(type="str"), + ) + module_args = dict( name=dict(type="str"), uuid=dict(type="str"), + filters=dict( + type="dict", + options=filters_spec, + ), ) return module_args -def get_tm(module, result): +def get_time_machine(module, result): tm = TimeMachine(module) uuid = module.params.get("uuid") name = module.params.get("name") - resp, err = tm.get_time_machine(uuid=uuid, name=name) + query_params = module.params.get("filters") + query_params = format_filters_map(query_params) + + resp, err = tm.get_time_machine(uuid=uuid, name=name, query=query_params) if err: result["error"] = err - module.fail_json(msg="Failed fetching time machine info", **result) + module.fail_json(msg="Failed fetching sla info", **result) result["response"] = resp -def get_tms(module, result): +def get_time_machines(module, result): tm = TimeMachine(module) - resp = tm.read() + query_params = module.params.get("filters") + query_params = format_filters_map(query_params) + + resp = tm.read(query=query_params) result["response"] = resp @@ -264,9 +330,9 @@ def run_module(): ) result = {"changed": False, "error": None, "response": None} if module.params.get("name") or module.params.get("uuid"): - get_tm(module, result) + get_time_machine(module, result) else: - get_tms(module, result) + get_time_machines(module, result) module.exit_json(**result) diff --git a/plugins/modules/ntnx_ndb_vlans.py b/plugins/modules/ntnx_ndb_vlans.py new file mode 100644 index 000000000..bd978ce57 --- /dev/null +++ b/plugins/modules/ntnx_ndb_vlans.py @@ -0,0 +1,421 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_vlans +short_description: Module for create, update and delete of ndb vlan. +version_added: 1.8.0 +description: + - Module for create, update and delete of ndb vlans + - Module for management ip pools in vlans +options: + vlan_uuid: + description: + - uuid for update or delete of vlan + type: str + name: + description: + - name of vlan + - update allowed + type: str + vlan_type: + description: + - wheather the vlan is mannaged or no + - update allowed + type: str + choices: ["DHCP", "Static"] + cluster: + description: + - Name or UUID of the cluster on which the vlan will be placed + type: dict + suboptions: + name: + description: + - Cluster Name + - Mutually exclusive with C(uuid) + type: str + uuid: + description: + - Cluster UUID + - Mutually exclusive with C(name) + type: str + gateway: + description: + - The gateway ip address + - update allowed + type: str + subnet_mask: + description: + - subnet mask + - update allowed + type: str + primary_dns: + description: + - DNS servers IP + - update allowed + type: str + secondary_dns: + description: + - DNS servers IP + - update allowed + type: str + dns_domain: + description: + - The domain name + - update allowed + type: str + ip_pools: + description: + - Range of IPs + - update allowed + type: list + elements: dict + suboptions: + start_ip: + description: + - The start address of the IPs pool range + type: str + end_ip: + description: + - The end address of the IPs pool range + type: str + remove_ip_pools: + description: + - Range of IPs to remove + type: list + elements: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_base_module + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: create Dhcp ndb vlan + ntnx_ndb_vlans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + name: test-vlan-name + vlan_type: DHCP + cluster: + uuid: "" + register: result + +- name: create static ndb vlan + ntnx_ndb_vlans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + name: test-vlan-name + vlan_type: Static + cluster: + uuid: "" + gateway: "" + subnet_mask: "" + ip_pools: + - + start_ip: "" + end_ip: "" + - + start_ip: "" + end_ip: "" + primary_dns: "" + secondary_dns: "" + dns_domain: "" + register: result + +- name: update ndb vlan type + ntnx_ndb_vlans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + vlan_uuid: "" + vlan_type: DHCP + register: result + +- name: Delete vlan + ntnx_ndb_vlans: + nutanix_host: + nutanix_username: + nutanix_password: + validate_certs: false + state: absent + vlan_uuid: "" + register: result + +""" + +RETURN = r""" +response: + description: vlan creation response after provisioning + returned: always + type: dict + sample: {} +vlan_uuid: + description: created vlan UUID + returned: always + type: str + sample: "00000-0000-000-0000-000000" +clusterId: + description: Cluster ID + returned: always + type: str + sample: "00000-0000-000-0000-000000" +name: + description: vlan name + returned: always + type: str + sample: "test-name" +type: + description: vlan type Static or Dhcp + returned: always + type: str + sample: "Static" +managed: + description: mannaged or unmannged vlan + returned: always + type: bool + +propertiesMap: + description: confiuration of static vlan + type: dict + returned: always + sample: + { + "VLAN_DNS_DOMAIN": "0.0.0.0", + "VLAN_GATEWAY": "0.0.0.0", + "VLAN_PRIMARY_DNS": "0.0.0.0", + "VLAN_SECONDARY_DNS": "0.0.0.0", + "VLAN_SUBNET_MASK": "0.0.0.0", + } +ipPools: + description: Range of ip's + type: list + returned: always + sample: + [ + { + "endIP": "0.0.0.0", + "id": "000000-00000-000000-0000", + "ipAddresses": [ + { + "ip": "0.0.0.0", + "status": "Available" + }, + { + "ip": "0.0.0.0", + "status": "Available" + }, + { + "ip": "0.0.0.0", + "status": "Available" + }, + ], + "modifiedBy": "000000-00000-000000-0000", + "startIP": "0.0.0.0" + } + ] + +properties: + description: list of confiuration of static vlan + type: list + returned: always + sample: + [ + { + "name": "VLAN_DNS_DOMAIN", + "secure": false, + "value": "0.0.0.0" + }, + { + "name": "VLAN_GATEWAY", + "secure": false, + "value": "0.0.0.0" + }, + { + "name": "VLAN_PRIMARY_DNS", + "secure": false, + "value": "0.0.0.0" + }, + { + "name": "VLAN_SECONDARY_DNS", + "secure": false, + "value": "0.0.0.0" + }, + { + "name": "VLAN_SUBNET_MASK", + "secure": false, + "value": "0.0.0.0" + } + ] +""" +from ..module_utils.ndb.base_module import NdbBaseModule # noqa: E402 +from ..module_utils.ndb.vlans import VLAN # noqa: E402 +from ..module_utils.utils import remove_param_with_none_value # noqa: E402 + + +def get_module_spec(): + mutually_exclusive = [("name", "uuid")] + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + ip_pool_spec = dict(start_ip=dict(type="str"), end_ip=dict(type="str")) + + module_args = dict( + name=dict(type="str"), + vlan_type=dict(type="str", choices=["DHCP", "Static"]), + vlan_uuid=dict(type="str"), + cluster=dict( + type="dict", options=entity_by_spec, mutually_exclusive=mutually_exclusive + ), + ip_pools=dict( + type="list", + elements="dict", + options=ip_pool_spec, + required_by={"end_ip": "start_ip"}, + ), + remove_ip_pools=dict(type="list", elements="str"), + gateway=dict(type="str"), + subnet_mask=dict(type="str"), + primary_dns=dict(type="str"), + secondary_dns=dict(type="str"), + dns_domain=dict(type="str"), + ) + return module_args + + +def create_vlan(module, result): + vlan = VLAN(module) + + name = module.params.get("name") + uuid, err = vlan.get_uuid(name) + if uuid: + module.fail_json(msg="vlan with given name already exists", **result) + + spec, err = vlan.get_spec(validate_module_params=True) + if err: + result["error"] = err + module.fail_json(msg="Failed generating create vlan spec", **result) + + if module.check_mode: + result["response"] = spec + return + + resp = vlan.create(data=spec) + + result["response"] = resp + vlan_uuid = resp["id"] + result["vlan_uuid"] = vlan_uuid + result["changed"] = True + + +def update_vlan(module, result): + vlan = VLAN(module) + + uuid = module.params.get("vlan_uuid") + ip_pools = module.params.pop("ip_pools", None) + remove_ip_pools = module.params.pop("remove_ip_pools", None) + if not uuid: + module.fail_json(msg="vlan_uuid is required field for update", **result) + + resp, err = vlan.get_vlan(uuid=uuid, detailed=False) + + old_spec = vlan.get_default_update_spec(override_spec=resp) + + update_spec, err = vlan.get_spec(old_spec=old_spec, validate_module_params=True) + + if err: + result["error"] = err + module.fail_json(msg="Failed generating update vlan spec", **result) + + if module.check_mode: + result["response"] = update_spec + return + + if check_for_idempotency(old_spec, update_spec): + if not remove_ip_pools and not ip_pools: + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + else: + vlan.update(data=update_spec, uuid=uuid) + + if remove_ip_pools: + vlan.remove_ip_pools(vlan_uuid=uuid, ip_pools=remove_ip_pools) + if ip_pools: + resp, err = vlan.add_ip_pools( + vlan_uuid=uuid, ip_pools=ip_pools, old_spec=old_spec + ) + if err: + result["warning"] = "IP pool is not added. Error: {0}".format(err) + + resp, err = vlan.get_vlan(uuid=uuid, detailed=True) + + result["response"] = resp + result["vlan_uuid"] = uuid + result["changed"] = True + + +def delete_vlan(module, result): + vlan = VLAN(module) + + uuid = module.params.get("vlan_uuid") + if not uuid: + module.fail_json(msg="vlan_uuid is required field for delete", **result) + + resp = vlan.delete(uuid) + + result["response"] = resp + result["changed"] = True + + +def check_for_idempotency(old_spec, update_spec): + + for key, value in update_spec.items(): + if old_spec.get(key) != value: + return False + + return True + + +def run_module(): + module = NdbBaseModule( + argument_spec=get_module_spec(), + mutually_exclusive=[("vlan_uuid", "name")], + required_if=[ + ("state", "present", ("name", "vlan_uuid"), True), + ("state", "absent", ("vlan_uuid",)), + ], + required_by={"remove_ip_pools": "vlan_uuid"}, + required_one_of=[("vlan_uuid", "vlan_type")], + supports_check_mode=True, + ) + remove_param_with_none_value(module.params) + result = {"changed": False, "error": None, "response": None, "vlan_uuid": None} + if module.params["state"] == "present": + if module.params.get("vlan_uuid"): + update_vlan(module, result) + else: + create_vlan(module, result) + else: + delete_vlan(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_vlans_info.py b/plugins/modules/ntnx_ndb_vlans_info.py new file mode 100644 index 000000000..8b34f07db --- /dev/null +++ b/plugins/modules/ntnx_ndb_vlans_info.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_vlans_info +short_description: info module for ndb vlans +version_added: 1.8.0 +description: 'Get vlan info' +options: + name: + description: + - vlan name + type: str + uuid: + description: + - vlan id + type: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_info_base_module +author: + - Prem Karat (@premkarat) + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" +EXAMPLES = r""" +- name: List all era vlans + ntnx_ndb_vlans_info: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + register: vlans + +- name: get era vlans using it's name + ntnx_ndb_vlans_info: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + name: "test_name" + register: result + +- name: List vlans use id + ntnx_ndb_vlans_info: + nutanix_host: "" + nutanix_username: "" + nutanix_password: "" + validate_certs: false + uuid: "" + register: result + +""" +RETURN = r""" +response: + description: listing all vlans + returned: always + type: list + sample: [] +""" + +from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.ndb.vlans import VLAN # noqa: E402 + + +def get_module_spec(): + + module_args = dict( + name=dict(type="str"), + uuid=dict(type="str"), + ) + + return module_args + + +def get_vlan(module, result): + vlan = VLAN(module) + resp, err = vlan.get_vlan( + uuid=module.params.get("uuid"), name=module.params.get("name") + ) + + if err: + result["error"] = err + module.fail_json(msg="Failed fetching vlan info", **result) + result["response"] = resp + + +def get_vlans(module, result): + vlan = VLAN(module) + + resp = vlan.read() + + result["response"] = resp + + +def run_module(): + module = NdbBaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[("name", "uuid")], + ) + result = {"changed": False, "error": None, "response": None} + if module.params.get("name") or module.params.get("uuid"): + get_vlan(module, result) + else: + get_vlans(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/tests/integration/targets/ntnx_acps/aliases b/tests/integration/targets/ntnx_acps/aliases index e69de29bb..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_acps/aliases +++ b/tests/integration/targets/ntnx_acps/aliases @@ -0,0 +1 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_acps_info/aliases b/tests/integration/targets/ntnx_acps_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_acps_info/aliases +++ b/tests/integration/targets/ntnx_acps_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_address_groups/aliases b/tests/integration/targets/ntnx_address_groups/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_address_groups/aliases +++ b/tests/integration/targets/ntnx_address_groups/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_address_groups_info/aliases b/tests/integration/targets/ntnx_address_groups_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_address_groups_info/aliases +++ b/tests/integration/targets/ntnx_address_groups_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_categories/aliases b/tests/integration/targets/ntnx_categories/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_categories/aliases +++ b/tests/integration/targets/ntnx_categories/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_categories_info/aliases b/tests/integration/targets/ntnx_categories_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_categories_info/aliases +++ b/tests/integration/targets/ntnx_categories_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_clusters_info/aliases b/tests/integration/targets/ntnx_clusters_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_clusters_info/aliases +++ b/tests/integration/targets/ntnx_clusters_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_hosts_info/aliases b/tests/integration/targets/ntnx_hosts_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_hosts_info/aliases +++ b/tests/integration/targets/ntnx_hosts_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_image_placement_policies_info/aliases b/tests/integration/targets/ntnx_image_placement_policies_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_image_placement_policies_info/aliases +++ b/tests/integration/targets/ntnx_image_placement_policies_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_image_placement_policy/aliases b/tests/integration/targets/ntnx_image_placement_policy/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_image_placement_policy/aliases +++ b/tests/integration/targets/ntnx_image_placement_policy/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_images/aliases b/tests/integration/targets/ntnx_images/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_images/aliases +++ b/tests/integration/targets/ntnx_images/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_images_info/aliases b/tests/integration/targets/ntnx_images_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_images_info/aliases +++ b/tests/integration/targets/ntnx_images_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/aliases b/tests/integration/targets/ntnx_ndb_availability_databases/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_availability_databases/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/meta/main.yml b/tests/integration/targets/ntnx_ndb_availability_databases/meta/main.yml new file mode 100644 index 000000000..ea2e9da19 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_availability_databases/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/readme.md b/tests/integration/targets/ntnx_ndb_availability_databases/readme.md new file mode 100644 index 000000000..ae2744681 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_availability_databases/readme.md @@ -0,0 +1,2 @@ +### Modules Tested: +1. HA instances using ntnx_ndb_databases \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/main.yml b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/main.yml new file mode 100644 index 000000000..d09f77ab1 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false + + block: + - import_tasks: "tests.yml" diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml new file mode 100644 index 000000000..b5d83345a --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml @@ -0,0 +1,601 @@ +--- +# Summary: +# This playbook will test below cases: +# 1. Create HA instance spec with check mode and minimal spec +# 2. Create HA postgres database instance with multicluster nodes +# 3. Create HA postgres database instance with static IP and cluster IP assigments + +- debug: + msg: "start ndb databases test flow for testing high availability databases" + +- name: Generate random name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + +- set_fact: + db1_name: "{{random_name[0]}}-ha" + db1_name_updated: "{{random_name[0]}}-updated" + cluster1_name: "{{random_name[0]}}-cluster" + +- name: create HA instance postgres database spec using check mode + check_mode: yes + ntnx_ndb_databases: + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "{{postgres_ha_profiles.db_params_profile.name}}" + + db_server_cluster: + new_cluster: + name: "{{cluster1_name}}" + cluster: + name: "{{cluster.cluster1.name}}" + software_profile: + name: "{{ postgres_ha_profiles.software_profile.name }}" + network_profile: + name: "{{ postgres_ha_profiles.static_network_profile.name }}" + compute_profile: + name: "{{ postgres_ha_profiles.compute_profile.name }}" + password: "temp_password" + pub_ssh_key: "test_key" + vms: + - name: "{{cluster1_name}}-vm-1" + node_type: "database" + role: "Primary" + archive_log_destination: "/temp/" + - name: "{{cluster1_name}}-vm-2" + node_type: "database" + role: "Secondary" + archive_log_destination: "/temp/" + - name: "{{cluster1_name}}-vm-3" + cluster: + name: "{{cluster.cluster2.name}}" + node_type: "database" + role: "Secondary" + archive_log_destination: "/temp/" + compute_profile: + uuid: "test_compute_uuid" + - name: "{{cluster1_name}}-ha-vm" + cluster: + name: "{{cluster.cluster2.name}}" + node_type: "haproxy" + + + postgres: + type: "ha" + db_name: testAnsible + db_password: "{{ vm_password }}" + db_size: 200 + listener_port: "9999" + patroni_cluster_name: "patroni_cluster" + enable_synchronous_mode: true + archive_wal_expire_days: 3 + post_create_script: "ls" + pre_create_script: "ls -a" + enable_peer_auth: true + ha_proxy: + provision_virtual_ip: true + write_port: "9999" + read_port: "8888" + + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + clusters: + - name: "{{cluster.cluster1.name}}" + - uuid: "test_uuid" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: result + +- set_fact: + expected_response: { + "actionArguments": [ + { + "name": "cluster_name", + "value": "{{cluster1_name}}" + }, + { + "name": "listener_port", + "value": "9999" + }, + { + "name": "allocate_pg_hugepage", + "value": false + }, + { + "name": "cluster_database", + "value": false + }, + { + "name": "db_password", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + }, + { + "name": "pre_create_script", + "value": "ls -a" + }, + { + "name": "post_create_script", + "value": "ls" + }, + { + "name": "patroni_cluster_name", + "value": "patroni_cluster" + }, + { + "name": "archive_wal_expire_days", + "value": "3" + }, + { + "name": "enable_synchronous_mode", + "value": true + }, + { + "name": "enable_peer_auth", + "value": true + }, + { + "name": "node_type", + "value": "database" + }, + { + "name": "backup_policy", + "value": "primary_only" + }, + { + "name": "failover_mode", + "value": "Automatic" + }, + { + "name": "database_names", + "value": "testAnsible" + }, + { + "name": "database_size", + "value": "200" + }, + { + "name": "provision_virtual_ip", + "value": true + }, + { + "name": "proxy_write_port", + "value": "9999" + }, + { + "name": "proxy_read_port", + "value": "8888" + }, + { + "name": "deploy_haproxy", + "value": true + } + ], + "autoTuneStagingDrive": true, + "clustered": true, + "computeProfileId": "{{postgres_ha_profiles.compute_profile.uuid}}", + "createDbserver": true, + "databaseDescription": "ansible-created-db-desc", + "databaseType": "postgres_database", + "dbParameterProfileId": "{{postgres_ha_profiles.db_params_profile.uuid}}", + "maintenanceTasks": { + "maintenanceWindowId": "{{ maintenance.window_uuid }}", + "tasks": [ + { + "payload": { + "prePostCommand": { + "postCommand": "ls -a", + "preCommand": "ls" + } + }, + "taskType": "OS_PATCHING" + }, + { + "payload": { + "prePostCommand": { + "postCommand": "ls -F", + "preCommand": "ls -l" + } + }, + "taskType": "DB_PATCHING" + } + ] + }, + "name": "{{db1_name}}", + "networkProfileId": "{{postgres_ha_profiles.static_network_profile.uuid}}", + "nodeCount": 4, + "nodes": [ + { + "computeProfileId": "{{postgres_ha_profiles.compute_profile.uuid}}", + "networkProfileId": "{{postgres_ha_profiles.static_network_profile.uuid}}", + "nxClusterId": "{{cluster.cluster1.uuid}}", + "properties": [ + { + "name": "role", + "value": "Primary" + }, + { + "name": "node_type", + "value": "database" + }, + { + "name": "remote_archive_destination", + "value": "/temp/" + } + ], + "vmName": "{{cluster1_name}}-vm-1" + }, + { + "computeProfileId": "{{postgres_ha_profiles.compute_profile.uuid}}", + "networkProfileId": "{{postgres_ha_profiles.static_network_profile.uuid}}", + "nxClusterId": "{{cluster.cluster1.uuid}}", + "properties": [ + { + "name": "role", + "value": "Secondary" + }, + { + "name": "node_type", + "value": "database" + }, + { + "name": "remote_archive_destination", + "value": "/temp/" + } + ], + "vmName": "{{cluster1_name}}-vm-2" + }, + { + "computeProfileId": "test_compute_uuid", + "networkProfileId": "{{postgres_ha_profiles.static_network_profile.uuid}}", + "nxClusterId": "{{cluster.cluster2.uuid}}", + "properties": [ + { + "name": "role", + "value": "Secondary" + }, + { + "name": "node_type", + "value": "database" + }, + { + "name": "remote_archive_destination", + "value": "/temp/" + } + ], + "vmName": "{{cluster1_name}}-vm-3" + }, + { + "computeProfileId": "{{postgres_ha_profiles.compute_profile.uuid}}", + "networkProfileId": "{{postgres_ha_profiles.static_network_profile.uuid}}", + "nxClusterId": "{{cluster.cluster2.uuid}}", + "properties": [ + { + "name": "node_type", + "value": "haproxy" + } + ], + "vmName": "{{cluster1_name}}-ha-vm" + } + ], + "nxClusterId": "{{cluster.cluster1.uuid}}", + "softwareProfileId": "{{postgres_ha_profiles.software_profile.uuid}}", + "softwareProfileVersionId": "{{postgres_ha_profiles.software_profile.latest_version_id}}", + "sshPublicKey": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "tags": [], + "timeMachineInfo": { + "autoTuneLogDrive": true, + "description": "TM-desc", + "name": "TM1", + "schedule": {}, + "slaDetails": { + "primarySla": { + "nxClusterIds": [ + "{{cluster.cluster1.uuid}}", + "test_uuid" + ], + "slaId": "{{sla.uuid}}" + } + } + }, + "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + } + +- name: Check mode status + assert: + that: + - result.response is defined + - result.changed == False + - result.response == expected_response + + fail_msg: "Unable to create ha instance database create spec" + success_msg: "HA instance postgres database spec created successfully" + +- name: create HA instance postgres database with static IP assignments + ntnx_ndb_databases: + wait: true + timeout: 5400 + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "{{postgres_ha_profiles.db_params_profile.name}}" + + db_server_cluster: + new_cluster: + name: "{{cluster1_name}}" + cluster: + name: "{{cluster.cluster1.name}}" + ips: + - cluster: + name: "{{cluster.cluster1.name}}" + ip: "{{cluster_ips.vip}}" + + software_profile: + name: "{{ postgres_ha_profiles.software_profile.name }}" + network_profile: + name: "{{ postgres_ha_profiles.static_network_profile.name }}" + compute_profile: + name: "{{ postgres_ha_profiles.compute_profile.name }}" + password: "{{vm_password}}" + pub_ssh_key: "{{public_ssh_key}}" + vms: + + - name: "{{cluster1_name}}-vm-1" + node_type: "database" + role: "Primary" + ip: "{{cluster_ips.ip1}}" + + - name: "{{cluster1_name}}-vm-2" + node_type: "database" + role: "Secondary" + ip: "{{cluster_ips.ip2}}" + + - name: "{{cluster1_name}}-vm-3" + node_type: "database" + role: "Secondary" + ip: "{{cluster_ips.ip3}}" + + - name: "{{cluster1_name}}-vm-ha-proxy1" + node_type: "haproxy" + ip: "{{cluster_ips.ip4}}" + + - name: "{{cluster1_name}}-vm-ha-proxy2" + node_type: "haproxy" + ip: "{{cluster_ips.ip5}}" + + postgres: + type: "ha" + db_name: testAnsible + db_password: "{{ vm_password }}" + db_size: 200 + patroni_cluster_name: "patroni_cluster" + ha_proxy: + provision_virtual_ip: true + + time_machine: + name: TM2 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + clusters: + - name: "{{cluster.cluster1.name}}" + tags: + ansible-databases: "ha-instance-dbs" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: result + +- name: create properties map + set_fact: + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{result.response.properties}}" + no_log: true + + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'READY' + - result.db_uuid is defined + - result.changed == true + - result.response.name == db1_name + - result.response.description == "ansible-created-db-desc" + - properties["db_parameter_profile_id"] == postgres_ha_profiles.db_params_profile.uuid + - properties["listener_port"] == "5432" + - properties["cluster_ip"] == "['{{cluster_ips.vip}}']" + - result.response.databaseNodes | length == 3 + - result.response.databaseNodes[0].status == "READY" + - result.response.databaseNodes[1].status == "READY" + - result.response.databaseNodes[2].status == "READY" + - result.response.type == "postgres_database" + - result.response.tags | length == 1 + - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].value == "ha-instance-dbs" + + fail_msg: "Unable to provision postgres HA database instance" + success_msg: "postgres HA database instance provision successfully" + + +- name: unregister db along with delete time machine and db server vms + ntnx_ndb_databases: + db_uuid: "{{result.db_uuid}}" + state: "absent" + wait: true + delete_time_machine: true + delete_db_server_vms: True + register: result + +- name: verify status of delete of database along with time machine delete + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + - result.response.db_server_vms_delete_status.status == "5" + fail_msg: "database delete failed" + success_msg: "database deleted successfully" + + +- name: create HA instance postgres database with multicluster vms + ntnx_ndb_databases: + timeout: 5400 + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "{{postgres_ha_profiles.db_params_profile.name}}" + + db_server_cluster: + new_cluster: + name: "{{cluster1_name}}" + cluster: + name: "{{cluster.cluster1.name}}" + software_profile: + name: "{{ postgres_ha_profiles.software_profile.name }}" + network_profile: + name: "{{ postgres_ha_profiles.multicluster_network_profile.name }}" + compute_profile: + name: "{{ postgres_ha_profiles.compute_profile.name }}" + password: "{{vm_password}}" + pub_ssh_key: "{{public_ssh_key}}" + vms: + + - name: "{{cluster1_name}}-vm-1" + node_type: "database" + role: "Primary" + + - name: "{{cluster1_name}}-vm-2" + node_type: "database" + role: "Secondary" + + - name: "{{cluster1_name}}-vm-3" + cluster: + name: "{{cluster.cluster2.name}}" + node_type: "database" + role: "Secondary" + + postgres: + type: "ha" + db_name: testAnsible + db_password: "{{ vm_password }}" + db_size: 200 + patroni_cluster_name: "patroni_cluster" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + clusters: + - name: "{{cluster.cluster1.name}}" + - uuid: "{{cluster.cluster2.uuid}}" + tags: + ansible-databases: "ha-instance-dbs" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: result + + +- name: create properties map + set_fact: + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{result.response.properties}}" + no_log: true + + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'READY' + - result.db_uuid is defined + - result.changed == true + - result.response.name == db1_name + - result.response.description == "ansible-created-db-desc" + - properties["archive_wal_expire_days"] == "-1" + - properties["db_parameter_profile_id"] == postgres_ha_profiles.db_params_profile.uuid + - properties["listener_port"] == "5432" + - properties["vm_ip"] is defined + - result.response.databaseNodes | length == 3 + - result.response.databaseNodes[0].status == "READY" + - result.response.databaseNodes[1].status == "READY" + - result.response.databaseNodes[2].status == "READY" + - result.response.type == "postgres_database" + - result.response.tags | length == 1 + - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].value == "ha-instance-dbs" + + fail_msg: "Unable to provision postgres HA database instance" + success_msg: "postgres HA database instance provision successfully" + +- name: unregister db along with delete time machine and db server vms + ntnx_ndb_databases: + db_uuid: "{{result.db_uuid}}" + state: "absent" + wait: true + delete_time_machine: true + delete_db_server_vms: True + register: result + +- name: verify status of delete of database along with time machine delete + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + - result.response.db_server_vms_delete_status.status == "5" + fail_msg: "database delete failed" + success_msg: "database deleted successfully" diff --git a/tests/integration/targets/ntnx_ndb_clones_info/aliases b/tests/integration/targets/ntnx_ndb_clones_info/aliases new file mode 100644 index 000000000..b29a52bdb --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_clones_info/aliases @@ -0,0 +1,2 @@ + +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_clones_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_clones_info/meta/main.yml index 23b0fb268..6397436fc 100644 --- a/tests/integration/targets/ntnx_ndb_clones_info/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_clones_info/meta/main.yml @@ -1,2 +1,2 @@ dependencies: - - prepare_env \ No newline at end of file + - prepare_ndb_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_and_info/aliases b/tests/integration/targets/ntnx_ndb_clusters/aliases similarity index 100% rename from tests/integration/targets/ntnx_ndb_databases_and_info/aliases rename to tests/integration/targets/ntnx_ndb_clusters/aliases diff --git a/tests/integration/targets/ntnx_ndb_clusters/meta/main.yml b/tests/integration/targets/ntnx_ndb_clusters/meta/main.yml new file mode 100644 index 000000000..6397436fc --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_clusters/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml new file mode 100644 index 000000000..405e547c7 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_clusters/tasks/CRUD.yml @@ -0,0 +1,343 @@ +--- +- debug: + msg: Start testing ntnx_ndb_clusters + + +- name: Register cluster with prism_vlan in check mode + ntnx_ndb_clusters: + name: "{{cluster.cluster3.name}}" + desc: "{{cluster.cluster3.desc}}" + name_prefix: "{{cluster.cluster3.name_prefix}}" + cluster_ip: "{{cluster.cluster3.cluster_ip}}" + cluster_credentials: + username: "{{cluster.cluster3.cluster_credentials.username}}" + password: "{{cluster.cluster3.cluster_credentials.password}}" + agent_network: + dns_servers: + - "{{cluster.cluster3.agent_network.dns_servers[0]}}" + - "{{cluster.cluster3.agent_network.dns_servers[1]}}" + ntp_servers: + - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" + vlan_access: + prism_vlan: + vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" + vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" + static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" + gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" + subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" + storage_container: "{{cluster.cluster3.storage_container}}" + register: result + ignore_errors: true + check_mode: true + + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.clusterName == "{{cluster.cluster3.name}}" + - result.response.clusterDescription == "{{cluster.cluster3.desc}}" + - result.response.agentVMPrefix == "{{cluster.cluster3.name_prefix}}" + - result.response.clusterIP == "{{cluster.cluster3.cluster_ip}}" + - result.response.credentialsInfo[0].value == "{{cluster.cluster3.cluster_credentials.username}}" + - result.response.networksInfo[0].networkInfo[0].value == "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" + - result.response.networksInfo[0].networkInfo[1].value == "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" + - result.response.networksInfo[0].networkInfo[2].value == "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" + - result.response.networksInfo[0].networkInfo[3].value == "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" + - result.response.networksInfo[0].type== "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" + fail_msg: "fail: Wring with check mode for registring cluster" + success_msg: "pass: retunred as expected" + +- name: Register cluster with prism_vlan + ntnx_ndb_clusters: + wait: true + name: "{{cluster.cluster3.name}}" + desc: "{{cluster.cluster3.desc}}" + name_prefix: "{{cluster.cluster3.name_prefix}}" + cluster_ip: "{{cluster.cluster3.cluster_ip}}" + cluster_credentials: + username: "{{cluster.cluster3.cluster_credentials.username}}" + password: "{{cluster.cluster3.cluster_credentials.password}}" + agent_network: + dns_servers: + - "{{cluster.cluster3.agent_network.dns_servers[0]}}" + - "{{cluster.cluster3.agent_network.dns_servers[1]}}" + ntp_servers: + - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" + vlan_access: + prism_vlan: + vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" + vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" + static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" + gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" + subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" + storage_container: "{{cluster.cluster3.storage_container}}" + register: result + ignore_errors: true + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.response.name == "{{cluster.cluster3.name}}" + - result.response.description == "{{cluster.cluster3.desc}}" + - result.response.ipAddresses[0] == "{{cluster.cluster3.cluster_ip}}" + fail_msg: "fail: Unable to Register cluster with prisim_vlan" + success_msg: "pass: Register cluster with prisim_vlan finished successfully" + +################################################################ + +- name: update cluster name , desc + ntnx_ndb_clusters: + uuid: "{{result.cluster_uuid}}" + name: newname + desc: newdesc + register: result + ignore_errors: true + no_log: true + +- name: check listing status + assert: + that: + - result.response.name == "newname" + - result.cluster_uuid is defined + - result.response.description == "newdesc" + fail_msg: "fail: Unable to update cluster name , desc" + success_msg: "pass: update cluster name , desc finished successfully" +- set_fact: + todelete: "{{result.cluster_uuid}}" +################################################################ +- name: update cluster credeential in check_mode + ntnx_ndb_clusters: + uuid: "{{result.cluster_uuid}}" + cluster_credentials: + username: test + password: test + register: result + ignore_errors: true + no_log: true + check_mode: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.username is defined + - result.response.password is defined + - result.cluster_uuid is defined + fail_msg: "fail: update cluster credeential while check_mode" + success_msg: "pass: retunred as expected" +################################################################ + +- name: Negative Secnarios update storage container + ntnx_ndb_clusters: + uuid: "{{result.cluster_uuid}}" + storage_container: "{{cluster.cluster3.storage_container}}" + register: out + ignore_errors: true + no_log: true + +- name: check listing status + assert: + that: + - out.changed == false + - out.failed == true + - out.msg == "parameters are mutually exclusive: uuid|storage_container" + fail_msg: "Fail: storage_continer updated " + success_msg: " Success: returned error as expected " + +################################################################ + +- name: Negative Secnarios update vlan access + ntnx_ndb_clusters: + uuid: "{{result.cluster_uuid}}" + vlan_access: + prism_vlan: + vlan_name: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_name}}" + vlan_type: "{{cluster.cluster3.vlan_access.prism_vlan.vlan_type}}" + static_ip: "{{cluster.cluster3.vlan_access.prism_vlan.static_ip}}" + gateway: "{{cluster.cluster3.vlan_access.prism_vlan.gateway}}" + subnet_mask: "{{cluster.cluster3.vlan_access.prism_vlan.subnet_mask}}" + register: out + ignore_errors: true + no_log: true + +- name: check listing status + assert: + that: + - out.changed == false + - out.failed == true + - out.msg == "parameters are mutually exclusive: uuid|vlan_access" + fail_msg: "Fail: vlan_access updated " + success_msg: " Success: returned error as expected " + +################################################################ + +- name: Negative Secnarios update agent network + ntnx_ndb_clusters: + uuid: "{{result.cluster_uuid}}" + agent_network: + dns_servers: + - "{{cluster.cluster3.agent_network.dns_servers[0]}}" + - "{{cluster.cluster3.agent_network.dns_servers[1]}}" + ntp_servers: + - "{{cluster.cluster3.agent_network.ntp_servers[0]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[1]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[2]}}" + - "{{cluster.cluster3.agent_network.ntp_servers[3]}}" + register: out + ignore_errors: true + no_log: true + +- name: check listing status + assert: + that: + - out.changed == false + - out.failed == true + - out.msg == "parameters are mutually exclusive: uuid|agent_network" + fail_msg: "Fail: agent_network updated " + success_msg: " Success: returned error as expected " + +################################################################ + +- name: Negative Secnarios update agent network + ntnx_ndb_clusters: + uuid: "{{result.cluster_uuid}}" + name_prefix: "{{cluster.cluster3.name_prefix}}" + register: out + ignore_errors: true + no_log: true + +- name: check listing status + assert: + that: + - out.changed == false + - out.failed == true + - out.msg == "parameters are mutually exclusive: uuid|name_prefix" + fail_msg: "Fail: name_prefix updated " + success_msg: " Success: returned error as expected " + +################################################################ + +- name: List all NDB clusters + ntnx_ndb_clusters_info: + register: clusters + no_log: true + +- name: check listing status + assert: + that: + - clusters.response is defined + - clusters.failed == false + - clusters.changed == false + - clusters.response | length > 0 + fail_msg: "Unable to list all NDB clusters" + success_msg: "NDB clusters listed successfully" + +################################################################ +- name: get ndb cluster with count_entities + ntnx_ndb_clusters_info: + filters: + count_entities: true + register: result + ignore_errors: true + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response[0].entityCounts is defined + fail_msg: "Unable to get NDB clusters with count_entities " + success_msg: "get NDB clusters using with count_entities successfully" +################################################################ +- name: get NDB clusters using it's name + ntnx_ndb_clusters_info: + name: "{{clusters.response[0].name}}" + register: result + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{clusters.response[0].name}}" + fail_msg: "Unable to get NDB clusters using it's name " + success_msg: "get NDB clusters using it's name successfully" + +################################################################ + +- name: List clusters use id + ntnx_ndb_clusters_info: + uuid: "{{clusters.response[0].id}}" + register: result + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{clusters.response[0].name}}" + fail_msg: "Unable to get NDB clusters using it's id " + success_msg: "get NDB clusters using it's id successfully" + +################################################################ + +- name: get NDB clusters with incorrect name + ntnx_ndb_clusters_info: + name: "abcd" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.error is defined + - result.failed == true + - result.changed == false + fail_msg: "module didn't errored out correctly when incorrect name is given" + success_msg: "module errored out correctly when incorrect name is given" + +################################################################ + +- name: delete cluster + ntnx_ndb_clusters: + uuid: "{{todelete}}" + state: absent + register: result + ignore_errors: true + no_log: true + +- name: assert when status not complete + assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.status == "5" + fail_msg: "Unable to delete custer" + success_msg: "cluster deleted successfully" + + +- set_fact: + todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_db_servers_info/tasks/main.yml b/tests/integration/targets/ntnx_ndb_clusters/tasks/main.yml similarity index 86% rename from tests/integration/targets/ntnx_ndb_db_servers_info/tasks/main.yml rename to tests/integration/targets/ntnx_ndb_clusters/tasks/main.yml index da502fcc5..cbd87d175 100644 --- a/tests/integration/targets/ntnx_ndb_db_servers_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_clusters/tasks/main.yml @@ -6,4 +6,4 @@ nutanix_password: "{{ndb_password}}" validate_certs: false block: - - import_tasks: "info.yml" + - import_tasks: "CRUD.yml" diff --git a/tests/integration/targets/ntnx_ndb_clusters_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_clusters_info/meta/main.yml deleted file mode 100644 index 23b0fb268..000000000 --- a/tests/integration/targets/ntnx_ndb_clusters_info/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_clusters_info/tasks/info.yml b/tests/integration/targets/ntnx_ndb_clusters_info/tasks/info.yml deleted file mode 100644 index e19713ba0..000000000 --- a/tests/integration/targets/ntnx_ndb_clusters_info/tasks/info.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- debug: - msg: Start testing ntnx_ndb_clusters_info - -- name: List all era clusters - ntnx_ndb_clusters_info: - register: clusters - -- name: check listing status - assert: - that: - - clusters.response is defined - - clusters.failed == false - - clusters.changed == false - - clusters.response | length > 0 - fail_msg: "Unable to list all era clusters" - success_msg: "era clusters listed successfully" -################################################################ -- name: get era clusters using it's name - ntnx_ndb_clusters_info: - name: "{{clusters.response[0].name}}" - register: result - -- name: check listing status - assert: - that: - - result.response is defined - - result.failed == false - - result.changed == false - - result.response.name == "{{clusters.response[0].name}}" - fail_msg: "Unable to get era clusters using it's name " - success_msg: "get era clusters using it's name successfully" -################################################################ -- name: List clusters use id - ntnx_ndb_clusters_info: - uuid: "{{clusters.response[0].id}}" - register: result - -- name: check listing status - assert: - that: - - result.response is defined - - result.failed == false - - result.changed == false - - result.response.name == "{{clusters.response[0].name}}" - fail_msg: "Unable to get era clusters using it's id " - success_msg: "get era clusters using it's id successfully" -################################################################ - - -- name: get era clusters with incorrect name - ntnx_ndb_clusters_info: - name: "abcd" - register: result - ignore_errors: True - no_log: true - -- name: check listing status - assert: - that: - - result.error is defined - - result.failed == true - - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_database_clones/aliases b/tests/integration/targets/ntnx_ndb_database_clones/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_ndb_database_clones/meta/main.yml b/tests/integration/targets/ntnx_ndb_database_clones/meta/main.yml new file mode 100644 index 000000000..ea2e9da19 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_database_clones/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml b/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml new file mode 100644 index 000000000..31ceb27bc --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml @@ -0,0 +1,789 @@ +--- +# Summary: +# This playbook will test below cases: +# 1. Creation of database clone on new db server vm +# 2. Updates and delete on database clone +# 3. Refresh action on database clone +# 4. Authorization and deauthorization of db server vm wrt to time machines +# 5. Creation of clone on authorized db server vm + +- debug: + msg: "start ndb database clone tests" + +- name: Generate random name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + +- set_fact: + db1_name: "{{random_name[0]}}" + clone_db1: "{{random_name[0]}}-clone" + vm1_name: "{{random_name[0]}}-vm" + tm1: "{{random_name[0]}}-time-machine" + snapshot_name: "{{random_name[0]}}-snapshot" + +############################################ setup db and its snapshot for clone tests ########################################### + + +- name: create single instance postgres database on new db server vm + ntnx_ndb_databases: + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + create_new_server: + name: "{{ vm1_name }}-db" + desc: "vm for db server" + password: "{{ vm_password }}" + cluster: + name: "{{cluster.cluster1.name}}" + software_profile: + name: "{{ software_profile.name }}" + network_profile: + name: "{{ network_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + pub_ssh_key: "{{ public_ssh_key }}" + + postgres: + listener_port: "5432" + db_name: testAnsible + db_password: "{{ vm_password }}" + db_size: 200 + type: "single" + + time_machine: + name: "{{tm1}}" + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + + register: result + +- set_fact: + db_uuid: "{{result.db_uuid}}" + +- set_fact: + time_machine_uuid: "{{result.response.timeMachineId}}" + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'READY' + - result.db_uuid is defined + - result.changed == true + + fail_msg: "Unable to create single instance postgres database" + success_msg: "single instance postgres database created successfully" + +- set_fact: + db_server_uuid: "{{result.response.databaseNodes[0].dbserverId}}" + +- name: create manual snapshot of database + ntnx_ndb_database_snapshots: + time_machine_uuid: "{{time_machine_uuid}}" + name: "{{snapshot_name}}" + register: result + +- name: snapshot status + assert: + that: + - result.response is defined + - result.changed == True + - result.snapshot_uuid is defined + - result.response.status == "ACTIVE" + + fail_msg: "unable to create snapshot of database" + success_msg: "snapshot for clone tests created successfully" + +- set_fact: + snapshot_uuid: "{{result.snapshot_uuid}}" + +############################################ create clone on new db server vm tests ########################################### + + +- name: create spec for clone of database created above on new db server vm + check_mode: yes + ntnx_ndb_database_clones: + name: "{{clone_db1}}" + desc: "ansible-created-clone" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + create_new_server: + name: "{{ vm1_name }}" + desc: "vm for db server" + password: "{{ vm_password }}" + cluster: + name: "{{cluster.cluster1.name}}" + network_profile: + name: "{{ network_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + pub_ssh_key: "{{ public_ssh_key }}" + + postgres: + db_password: "{{vm_password}}" + pre_clone_cmd: "ls" + post_clone_cmd: "ls -a" + + time_machine: + name: "{{tm1}}" + pitr_timestamp: "2023-02-04 07:29:36" + timezone: "UTC" + + removal_schedule: + days: 2 + timezone: "Asia/Calcutta" + remind_before_in_days: 1 + delete_database: True + + refresh_schedule: + days: 2 + time: "12:00:00" + timezone: "Asia/Calcutta" + + tags: + ansible-clones: ansible-test-db-clones + register: result + + + +- set_fact: + expected_response: { + "actionArguments": [ + { + "name": "db_password", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + }, + { + "name": "pre_clone_cmd", + "value": "ls" + }, + { + "name": "post_clone_cmd", + "value": "ls -a" + }, + { + "name": "dbserver_description", + "value": "vm for db server" + } + ], + "clustered": false, + "computeProfileId": "{{compute_profile.uuid}}", + "createDbserver": true, + "databaseParameterProfileId": "{{db_params_profile.uuid}}", + "description": "ansible-created-clone", + "latestSnapshot": false, + "lcmConfig": { + "databaseLCMConfig": { + "expiryDetails": { + "deleteDatabase": true, + "expireInDays": 2, + "expiryDateTimezone": "Asia/Calcutta", + "remindBeforeInDays": 1 + }, + "refreshDetails": { + "refreshDateTimezone": "Asia/Calcutta", + "refreshInDays": 2, + "refreshTime": "12:00:00" + } + } + }, + "name": "{{clone_db1}}", + "networkProfileId": "{{network_profile.uuid}}", + "nodeCount": 1, + "nodes": [ + { + "computeProfileId": "{{compute_profile.uuid}}", + "networkProfileId": "{{network_profile.uuid}}", + "nxClusterId": "{{cluster.cluster1.uuid}}", + "properties": [], + "vmName": "{{vm1_name}}" + } + ], + "nxClusterId": "{{cluster.cluster1.uuid}}", + "snapshotId": null, + "sshPublicKey": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "tags": [ + { + "tagId": "{{tags.clones.uuid}}", + "tagName": "ansible-clones", + "value": "ansible-test-db-clones" + } + ], + "timeMachineId": "{{time_machine_uuid}}", + "timeZone": "UTC", + "userPitrTimestamp": "2023-02-04 07:29:36", + "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + } + +- name: Check mode status + assert: + that: + - result.response is defined + - result.changed == False + - result.response == expected_response + + fail_msg: "Unable to create DB clone provision spec" + success_msg: "DB clone create spec generated successfully using check_mode" + +- name: create clone using snapshot of previously created database + ntnx_ndb_database_clones: + name: "{{clone_db1}}" + desc: "ansible-created-clone" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + create_new_server: + name: "{{ vm1_name }}" + desc: "vm for db server" + password: "{{ vm_password }}" + cluster: + name: "{{cluster.cluster1.name}}" + network_profile: + name: "{{ network_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + pub_ssh_key: "{{ public_ssh_key }}" + + postgres: + db_password: "{{vm_password}}" + + time_machine: + name: "{{tm1}}" + snapshot_uuid: "{{snapshot_uuid}}" + + removal_schedule: + days: 2 + timezone: "Asia/Calcutta" + remind_before_in_days: 1 + delete_database: True + + refresh_schedule: + days: 2 + time: "12:00:00" + timezone: "Asia/Calcutta" + + tags: + ansible-clones: ansible-test-db-clones + register: result + +- name: create properties map + set_fact: + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{result.response.properties}}" + no_log: true + +- name: Clone create status + assert: + that: + - result.response is defined + - result.changed == True + - result.uuid is defined + - result.uuid == result.response.id + - result.response.status == "READY" + - result.response.name == clone_db1 + - result.response.description == "ansible-created-clone" + - result.response.clone == True + - result.response.lcmConfig.expiryDetails.expireInDays == 2 + - result.response.lcmConfig.expiryDetails.remindBeforeInDays == 1 + - result.response.lcmConfig.expiryDetails.deleteDatabase == True + - result.response.lcmConfig.expiryDetails.expiryDateTimezone == "Asia/Calcutta" + - result.response.lcmConfig.refreshDetails.refreshDateTimezone == "Asia/Calcutta" + - result.response.lcmConfig.refreshDetails.refreshInDays == 2 + - result.response.lcmConfig.refreshDetails.refreshTime == "12:00:00" + - properties["db_parameter_profile_id"] == db_params_profile.uuid + - result.response.tags | length == 1 + - result.response.tags[0].tagName == "{{tags.clones.name}}" + - result.response.tags[0].value == "ansible-test-db-clones" + - result.response.sourceSnapshotId == snapshot_uuid + - result.response.parentTimeMachineId == time_machine_uuid + fail_msg: "Unable to create clone" + success_msg: "Database clone created successfully" + +- set_fact: + clone_uuid: "{{result.uuid}}" + +- set_fact: + db_server_uuid: "{{result.response.databaseNodes[0].dbserverId}}" + +############################################ clone update and removal/refresh schedules related tests ########################################### + + +- name: update name, desc, tags and schedules + ntnx_ndb_database_clones: + uuid: "{{clone_uuid}}" + name: "{{clone_db1}}-updated" + desc: "ansible-created-clone-updated" + + removal_schedule: + timestamp: "2023-02-10 07:29:36" + timezone: "Asia/Calcutta" + remind_before_in_days: 4 + delete_database: false + + refresh_schedule: + days: 4 + time: "14:00:00" + timezone: "Asia/Calcutta" + + tags: + ansible-clones: ansible-test-db-clones-updated + register: result + +- name: Clone update status + assert: + that: + - result.response is defined + - result.changed == True + - result.uuid is defined + - result.uuid == result.response.id + - result.response.status == "READY" + - result.response.name == "{{clone_db1}}-updated" + - result.response.description == "ansible-created-clone-updated" + - result.response.lcmConfig.expiryDetails.expiryTimestamp == "2023-02-10 07:29:36" + - result.response.lcmConfig.expiryDetails.remindBeforeInDays == 4 + - result.response.lcmConfig.expiryDetails.deleteDatabase == False + - result.response.lcmConfig.refreshDetails.refreshInDays == 4 + - result.response.lcmConfig.refreshDetails.refreshTime == "14:00:00" + - result.response.tags | length == 1 + - result.response.tags[0].tagName == "{{tags.clones.name}}" + - result.response.tags[0].value == "ansible-test-db-clones-updated" + + fail_msg: "Unable to update clone" + success_msg: "Database clone updated succefully" + +- name: check idempotency + ntnx_ndb_database_clones: + uuid: "{{clone_uuid}}" + name: "{{clone_db1}}-updated" + desc: "ansible-created-clone-updated" + + removal_schedule: + timestamp: "2023-02-10 07:29:36" + timezone: "Asia/Calcutta" + remind_before_in_days: 4 + delete_database: false + + refresh_schedule: + days: 4 + time: "14:00:00" + timezone: "Asia/Calcutta" + + tags: + ansible-clones: ansible-test-db-clones-updated + register: result + + + +- name: check idempotency status + assert: + that: + - result.changed == false + - result.failed == false + - "'Nothing to change' in result.msg" + fail_msg: "database clone got updated" + success_msg: "database clone update got skipped due to no state changes" + + +- name: remove schedules + ntnx_ndb_database_clones: + uuid: "{{clone_uuid}}" + + removal_schedule: + state: "absent" + + refresh_schedule: + state: "absent" + + register: result + + + +- name: Check schedule remove status + assert: + that: + - result.response is defined + - result.changed == True + - result.uuid is defined + - result.response.status == "READY" + - result.response.lcmConfig.expiryDetails == None + - result.response.lcmConfig.refreshDetails == None + fail_msg: "schedules update failed" + success_msg: "schedules removed succefully" + +############################################ refresh clone ########################################### + + +- name: create spec for refresh clone to a pitr timestamp + check_mode: yes + ntnx_ndb_database_clone_refresh: + uuid: "{{clone_uuid}}" + pitr_timestamp: "2023-02-04 07:29:36" + timezone: "UTC" + register: result + + +- name: Check refresh db with pitr spec + assert: + that: + - result.response is defined + - result.changed == False + - result.response.userPitrTimestamp == "2023-02-04 07:29:36" + - result.response.timeZone == "UTC" + fail_msg: "creation refresh db clone spec failed" + success_msg: "refresh db clone spec created succesfully" + + +- name: refresh db clone + ntnx_ndb_database_clone_refresh: + uuid: "{{clone_uuid}}" + snapshot_uuid: "{{snapshot_uuid}}" + register: result + + + +- name: Check database refresh status + assert: + that: + - result.response is defined + - result.changed == True + - result.uuid is defined + - result.response.status == "READY" + fail_msg: "database refresh failed" + success_msg: "database refresh completed succefully" + +############################################ delete clone tests ########################################### + + +- name: create soft remove spec + check_mode: yes + ntnx_ndb_database_clones: + state: "absent" + uuid: "{{clone_uuid}}" + soft_remove: true + register: result + + + +- name: verify soft remove spec + assert: + that: + - result.changed == false + - result.failed == false + - result.response.delete == False + - result.response.remove == False + - result.response.softRemove == True + fail_msg: "creation of spec for soft remove failed" + success_msg: "spec for soft remove created successfully" + + + +- name: create unregistration spec + check_mode: yes + ntnx_ndb_database_clones: + state: "absent" + uuid: "{{clone_uuid}}" + register: result + + + +- name: verify unregistration spec + assert: + that: + - result.changed == false + - result.failed == false + - result.response.delete == False + - result.response.remove == True + - result.response.softRemove == False + fail_msg: "creation of spec for unregistration failed" + success_msg: "spec for unregistration created successfully" + +- name: delete clone db + ntnx_ndb_database_clones: + state: "absent" + uuid: "{{clone_uuid}}" + delete_from_vm: true + register: result + + + +- name: verify status of db clone delete + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + fail_msg: "database delete failed" + success_msg: "database deleted successfully" + +############################################ authorize and deauthorize db server vms########################################### + + +- name: authorize db server vms + ntnx_ndb_authorize_db_server_vms: + time_machine: + name: "{{tm1}}" + db_server_vms: + - name: "{{vm1_name}}" + register: result + + + +- name: verify status of authorization of db server vms + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "success" + fail_msg: "database authorization with time machine failed" + success_msg: "database authorization with time machine successfully" + +- name: deauthorize db server vms + ntnx_ndb_authorize_db_server_vms: + state: "absent" + time_machine: + name: "{{tm1}}" + db_server_vms: + - name: "{{vm1_name}}" + register: result + + + +- name: verify status of deauthorization of db server vms + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "success" + fail_msg: "database deauthorization with time machine failed" + success_msg: "database deauthorization with time machine went successfully" + + +- name: authorize db server vms for hosting clone + ntnx_ndb_authorize_db_server_vms: + time_machine: + name: "{{tm1}}" + db_server_vms: + - name: "{{vm1_name}}" + register: result + + +- name: verify status of authorization of db server vms + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "success" + fail_msg: "database authorization with time machine failed" + success_msg: "database authorization with time machine successfully" + +############################################ clone on authorized db server vm ########################################### + + +- name: Get timestamp in secs + shell: "date +'%s'" + register: timestamp + +- set_fact: + timestamp: "{{timestamp.stdout|int + 60*60*48|int}}" + +- name: Get timestamp in datetime format + shell: "date -d @{{timestamp}} '+%Y-%m-%d %H:%M:%S'" + register: timestamp + +- set_fact: + timestamp: "{{timestamp.stdout}}" + +- name: create clone using snapshot on authorized server + ntnx_ndb_database_clones: + name: "{{clone_db1}}" + desc: "ansible-created-clone" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + use_authorized_server: + name: "{{ vm1_name }}" + + postgres: + db_password: "{{vm_password}}" + + time_machine: + uuid: "{{time_machine_uuid}}" + snapshot_uuid: "{{snapshot_uuid}}" + + removal_schedule: + timestamp: "{{timestamp}}" + timezone: "Asia/Calcutta" + remind_before_in_days: 1 + delete_database: True + + refresh_schedule: + days: 2 + time: "12:00:00" + timezone: "Asia/Calcutta" + + tags: + ansible-clones: ansible-test-db-clones + register: result + + + +- name: Clone create status on authorized db server vm + assert: + that: + - result.response is defined + - result.changed == True + - result.uuid is defined + - result.uuid == result.response.id + - result.response.status == "READY" + - result.response.name == clone_db1 + - result.response.description == "ansible-created-clone" + - result.response.clone == True + - result.response.databaseNodes[0].dbserverId == db_server_uuid + - result.response.parentTimeMachineId == time_machine_uuid + fail_msg: "Unable to create clone" + success_msg: "Database clone created succefully" + + + +- set_fact: + delete_clone_uuid: "{{result.uuid}}" + +############################################ info module tests ################################## +- debug: + msg: Start testing ntnx_ndb_clones_info + +- name: List all era clones + ntnx_ndb_clones_info: + register: clones + +- name: check listing status + assert: + that: + - clones.response is defined + - clones.failed == false + - clones.changed == false + - clones.response | length > 0 + fail_msg: "Unable to list all era clones" + success_msg: "era clones listed successfully" +################################################################ +- name: get era clones using it's name + ntnx_ndb_clones_info: + name: "{{clones.response[0].name}}" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{clones.response[0].name}}" + fail_msg: "Unable to get era clones using it's name " + success_msg: "get era clones using it's name successfully" +################################################################ +- name: List clones use id + ntnx_ndb_clones_info: + uuid: "{{clones.response[0].id}}" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{clones.response[0].name}}" + fail_msg: "Unable to get era clones using it's id " + success_msg: "get era clones using it's id successfully" +################################################################ + + +- name: get era clones with incorrect name + ntnx_ndb_clones_info: + name: "abcd" + register: result + no_log: true + ignore_errors: True + +- name: check listing status + assert: + that: + - result.error is defined + - result.failed == true + - result.changed == false + fail_msg: "module didn't errored out correctly when incorrect name is given" + success_msg: "module errored out correctly when incorrect name is given" +############################################cleanup########################################### + +- name: delete clone db + ntnx_ndb_database_clones: + state: "absent" + uuid: "{{delete_clone_uuid}}" + delete_from_vm: true + register: result + + +- name: verify status of db clone delete + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + fail_msg: "database delete failed" + success_msg: "database delete successfully" + + +- name: delete db server vm + ntnx_ndb_db_server_vms: + state: "absent" + uuid: "{{db_server_uuid}}" + delete_from_cluster: true + register: result + +- name: verify status of delete of db server vm used for clone + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + fail_msg: "db server vm deleted failed" + success_msg: "db server vm deleted successfully" + + +- name: delete database created earlier + ntnx_ndb_databases: + state: "absent" + db_uuid: "{{db_uuid}}" + delete_db_server_vms: true + delete_time_machine: True + register: result + +- name: verify status of delete of database along with time machine and db server vm delete + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + - result.response.db_server_vms_delete_status.status == "5" + fail_msg: "database delete failed" + success_msg: "database deleted successfully" diff --git a/tests/integration/targets/ntnx_ndb_database_clones/tasks/main.yml b/tests/integration/targets/ntnx_ndb_database_clones/tasks/main.yml new file mode 100644 index 000000000..0d3a04a98 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_database_clones/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false + + block: + - import_tasks: "clones.yml" diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/aliases b/tests/integration/targets/ntnx_ndb_databases_actions/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_actions/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/meta/main.yml b/tests/integration/targets/ntnx_ndb_databases_actions/meta/main.yml new file mode 100644 index 000000000..ea2e9da19 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_actions/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/readme.md b/tests/integration/targets/ntnx_ndb_databases_actions/readme.md new file mode 100644 index 000000000..7e0698e1b --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_actions/readme.md @@ -0,0 +1,6 @@ +### Modules Tested: +1. ntnx_ndb_database_log_catchup +2. ntnx_ndb_database_restore +3. ntnx_ndb_database_snapshots +4. ntnx_ndb_database_scale +5. ntnx_ndb_linked_databases \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml new file mode 100644 index 000000000..31937aea3 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml @@ -0,0 +1,687 @@ +--- +# Summary: +# This playbook will test below cases: +# 1. Creation of single instance database +# 2. Create snapshot +# 3. Update and delete snapshots +# 4. Perform log catchup on database +# 5. Restore database to previously created snapshot and latest snapshot +# 6. Scale database +# 7. Add/Remove linked databases + +- debug: + msg: "start ndb database day2 actions tests" + +- name: Generate random name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + +- set_fact: + db1_name: "{{random_name[0]}}" + vm1_name: "{{random_name[0]}}-vm" + tm1: "{{random_name[0]}}-time-machine" + snapshot_name: "{{random_name[0]}}-snapshot" + +############################################ setup db ########################################### + + +- name: create single instance postgres database on new db server vm + ntnx_ndb_databases: + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + create_new_server: + name: "{{ vm1_name }}-db" + desc: "vm for db server" + password: "{{ vm_password }}" + cluster: + name: "{{cluster.cluster1.name}}" + software_profile: + name: "{{ software_profile.name }}" + network_profile: + name: "{{ network_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + pub_ssh_key: "{{ public_ssh_key }}" + + postgres: + listener_port: "5432" + db_name: testAnsible1 + db_password: "{{ vm_password }}" + db_size: 200 + type: "single" + + time_machine: + name: "{{tm1}}" + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + + register: result + +- set_fact: + db_uuid: "{{result.db_uuid}}" + +- set_fact: + time_machine_uuid: "{{result.response.timeMachineId}}" + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'READY' + - result.db_uuid is defined + - result.changed == true + + fail_msg: "Unable to create single instance postgres database" + success_msg: "single instance postgres database created successfully" + +- set_fact: + db_server_uuid: "{{result.response.databaseNodes[0].dbserverId}}" + + +############################################ snapshots test ########################################### + +- name: create snapshot create spec + check_mode: yes + ntnx_ndb_database_snapshots: + name: "{{snapshot_name}}" + time_machine_uuid: "{{time_machine_uuid}}" + clusters: + - name: "{{cluster.cluster1.name}}" + - uuid: "test_uuid2" + - uuid: "test_uuid3" + expiry_days: 4 + register: result + +- set_fact: + expected_response: { + "changed": false, + "error": null, + "failed": false, + "response": { + "lcmConfig": { + "snapshotLCMConfig": { + "expiryDetails": { + "expireInDays": 4, + } + } + }, + "name": "{{snapshot_name}}", + "replicateToClusterIds": [ + "{{cluster.cluster1.uuid}}", + "test_uuid2", + "test_uuid3" + ] + }, + "snapshot_uuid": null + } + +- name: Check mode status + assert: + that: + - result.response is defined + - result.changed == False + - result.response == expected_response.response + + fail_msg: "Unable to create snapshot create spec" + success_msg: "Snapshot create spec generated successfully using check mode" + + +- name: create snapshot with minimal spec + ntnx_ndb_database_snapshots: + name: "{{snapshot_name}}1" + time_machine_uuid: "{{time_machine_uuid}}" + register: result + + +- name: snapshot create status + assert: + that: + - result.response is defined + - result.changed == True + - result.snapshot_uuid is defined + - result.response.name == "{{snapshot_name}}1" + - result.response.timeMachineId == time_machine_uuid + fail_msg: "Unable to create snapshot" + success_msg: "Snapshot created successfully" + +- name: create snapshot with expiry + ntnx_ndb_database_snapshots: + name: "{{snapshot_name}}2" + time_machine_uuid: "{{time_machine_uuid}}" + expiry_days: 4 + register: result + + +- set_fact: + snapshot_uuid: "{{result.snapshot_uuid}}" + +- name: snapshot create status + assert: + that: + - result.response is defined + - result.changed == True + - result.snapshot_uuid is defined + - result.response.name == "{{snapshot_name}}2" + - result.response.timeMachineId == time_machine_uuid + - result.response.lcmConfig.expiryDetails.expireInDays == 4 + fail_msg: "Unable to create snapshot with expiry config" + success_msg: "Snapshot with expiry config created succesfully" + + + +- name: rename snapshot + ntnx_ndb_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + name: "{{snapshot_name}}2-updated" + register: result + +- name: check rename status + assert: + that: + - result.response is defined + - result.changed == True + - result.snapshot_uuid is defined + - result.response.name == "{{snapshot_name}}2-updated" + + fail_msg: "Unable to rename snapshot" + success_msg: "Snapshot renamed successfully" + + + +- name: update expiry + ntnx_ndb_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + expiry_days: 5 + register: result + +- name: snapshot expiry update status + assert: + that: + - result.response is defined + - result.changed == True + - result.snapshot_uuid is defined + - result.response.lcmConfig.expiryDetails.expireInDays == 5 + + fail_msg: "Unable to update snapshot expiry" + success_msg: "snapshot expiry updated successfully" + + + +- name: remove expiry schedule + ntnx_ndb_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + remove_expiry: true + register: result + +- name: snapshot expiry update status + assert: + that: + - result.response is defined + - result.changed == True + - result.snapshot_uuid is defined + - result.response.lcmConfig == None + + fail_msg: "Unable to remove snapshot expiry schedule" + success_msg: "snapshot expiry schedule removed successfully" + + +- name: Add expiry schedule and rename + ntnx_ndb_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + name: "{{snapshot_name}}2" + expiry_days: 6 + register: result + +- name: snapshot update status + assert: + that: + - result.response is defined + - result.changed == True + - result.snapshot_uuid is defined + - result.response.name == "{{snapshot_name}}2" + - result.response.timeMachineId == time_machine_uuid + - result.response.lcmConfig.expiryDetails.expireInDays == 6 + + fail_msg: "Unable to add expiry schedule and rename it" + success_msg: "Snapshot updated succesfully" + + +- name: Idempotency check + ntnx_ndb_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + expiry_days: 6 + register: result + +- name: check idempotency status + assert: + that: + - result.changed == false + - result.failed == false + - "'Nothing to change' in result.msg" + fail_msg: "snapshot got updated" + success_msg: "snapshot update got skipped due to no state changes" + + +############################################ log catchup ###################################### + +- name: create spec for log catchup + check_mode: yes + ntnx_ndb_database_log_catchup: + time_machine_uuid: "{{time_machine_uuid}}" + register: result + +- set_fact: + expected_response: { + "changed": false, + "error": null, + "failed": false, + "response": { + "actionArguments": [ + { + "name": "preRestoreLogCatchup", + "value": false + }, + { + "name": "switch_log", + "value": true + } + ], + "forRestore": false + } + } + + + +- name: Check mode status + assert: + that: + - result == expected_response + fail_msg: "Unable to create log catcup spec" + success_msg: "log catchup spec created successfully" + + +- name: create spec for log catchup for restore + check_mode: yes + ntnx_ndb_database_log_catchup: + time_machine_uuid: "{{time_machine_uuid}}" + for_restore: true + register: result + +- set_fact: + expected_response: { + "changed": false, + "error": null, + "failed": false, + "response": { + "actionArguments": [ + { + "name": "preRestoreLogCatchup", + "value": True + }, + { + "name": "switch_log", + "value": true + } + ], + "forRestore": true + } + } + + +- name: Check mode status + assert: + that: + - result == expected_response + fail_msg: "Unable to create log catcup spec" + success_msg: "log catchup spec created successfully" + + +- name: perform log catchup + ntnx_ndb_database_log_catchup: + time_machine_uuid: "{{time_machine_uuid}}" + for_restore: true + + register: result + +- name: verify log catchup status + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + fail_msg: "database log catchup failed" + success_msg: "database log catchup completed successfully" + +########################################### restore ########################################### + +- name: create restore database spec using pitr timestamp + check_mode: yes + ntnx_ndb_database_restore: + db_uuid: "{{db_uuid}}" + pitr_timestamp: "2023-01-02 11:02:22" + timezone: "UTC" + register: result + +- set_fact: + expected_result: { + "changed": false, + "db_uuid": null, + "error": null, + "failed": false, + "response": { + "actionArguments": [ + { + "name": "sameLocation", + "value": true + } + ], + "latestSnapshot": null, + "snapshotId": null, + "timeZone": "UTC", + "userPitrTimestamp": "2023-01-02 11:02:22" + } + } + +- name: Check mode status + assert: + that: + - result == expected_result + fail_msg: "Unable to create restore using pitr timestamp spec" + success_msg: "Spec for databas restore using pitr timetsmap created successfully" + + +- name: create restore database spec with latest snapshot + check_mode: yes + ntnx_ndb_database_restore: + db_uuid: "{{db_uuid}}" + register: result + +- set_fact: + expected_result: { + "changed": false, + "db_uuid": null, + "error": null, + "failed": false, + "response": { + "actionArguments": [ + { + "name": "sameLocation", + "value": true + } + ], + "latestSnapshot": true, + "snapshotId": null, + "timeZone": null, + "userPitrTimestamp": null + } + } + + +- name: Check mode status + assert: + that: + - result == expected_result + fail_msg: "Unable to create restore using latest snapshot spec" + success_msg: "Spec for databas restore using latest snapshot created successfully" + + + +- name: create restore database spec using snapshot uuid + check_mode: yes + ntnx_ndb_database_restore: + db_uuid: "{{db_uuid}}" + snapshot_uuid: "{{snapshot_uuid}}" + register: result + +- set_fact: + expected_result: { + "changed": false, + "db_uuid": null, + "error": null, + "failed": false, + "response": { + "actionArguments": [ + { + "name": "sameLocation", + "value": true + } + ], + "latestSnapshot": null, + "snapshotId": "{{snapshot_uuid}}", + "timeZone": null, + "userPitrTimestamp": null + } + } + +- name: Check mode status + assert: + that: + - result == expected_result + fail_msg: "Unable to create restore using snapshot uuid spec" + success_msg: "Spec for databas restore using snapshot uuid created successfully" + + +- name: perform restore using latest snapshot + ntnx_ndb_database_restore: + db_uuid: "{{db_uuid}}" + snapshot_uuid: "{{snapshot_uuid}}" + register: result + +- name: restore status + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + fail_msg: "Unable to restore database using latest snapshot" + success_msg: "database restored successfully using latest snapshot" + + +- name: perform restore using snapshot uuid + ntnx_ndb_database_restore: + db_uuid: "{{db_uuid}}" + snapshot_uuid: "{{snapshot_uuid}}" + register: result + +- name: restore status + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + fail_msg: "Unable to restore database using snapshot uuid" + success_msg: "database restored successfully using snapshot uuid" + +########################################### scaling ########################################### + +- name: create spec for scaling + check_mode: yes + ntnx_ndb_database_scale: + db_uuid: "{{db_uuid}}" + storage_gb: 10 + pre_update_cmd: "ls" + post_update_cmd: "ls -a" + + register: result + +- set_fact: + expected_result: { + "changed": false, + "db_uuid": null, + "error": null, + "failed": false, + "response": { + "actionArguments": [ + { + "name": "working_dir", + "value": "/tmp" + }, + { + "name": "data_storage_size", + "value": 10 + }, + { + "name": "pre_script_cmd", + "value": "ls" + }, + { + "name": "post_script_cmd", + "value": "ls -a" + } + ], + "applicationType": "postgres_database" + } + } + +- name: Check mode status + assert: + that: + - result == expected_result + fail_msg: "Unable to create database scaling spec" + success_msg: "Spec for database scaling with pre post commands created successfully" + + +- name: extend database storage for scaling database + ntnx_ndb_database_scale: + db_uuid: "{{db_uuid}}" + storage_gb: 2 + pre_update_cmd: "ls" + post_update_cmd: "ls -a" + + register: result + +- name: database scaling status + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + fail_msg: "Unable to extend database storage (scale)" + success_msg: "database storage extended (scaling) successfully" + +############################################ add / remove linked databases ########################################### + + +- name: create databases in database instance + check_mode: yes + ntnx_ndb_linked_databases: + db_instance_uuid: "{{db_uuid}}" + databases: + - test1 + - test2 + - test3 + register: result + +- set_fact: + expected_result: { + "changed": false, + "db_instance_uuid": "{{db_uuid}}", + "error": null, + "failed": false, + "response": { + "databases": [ + { + "databaseName": "test1" + }, + { + "databaseName": "test2" + }, + { + "databaseName": "test3" + } + ] + } + } + +- name: Check mode status + assert: + that: + - result == expected_result + fail_msg: "Unable to create spec for adding databases in database instance" + success_msg: "Spec for adding databases in database instance created successfully" + + +- name: add databases in database instance + ntnx_ndb_linked_databases: + db_instance_uuid: "{{db_uuid}}" + databases: + - test1 + - test2 + register: result + +- name: create linked databases to its uuid map + set_fact: + linked_databases: "{{ linked_databases | default({}) | combine ({ item['name'] : item['id'] }) }}" + loop: "{{result.response}}" + no_log: true + + +- name: check linked database update status + assert: + that: + - result.changed == true + - result.db_instance_uuid is defined + - "'test1' in linked_databases" + - "'test2' in linked_databases" + fail_msg: "Unable to add database to database instance" + success_msg: "databases added to database instance successfully" + + +- name: remove databases in database instance + ntnx_ndb_linked_databases: + state: "absent" + db_instance_uuid: "{{db_uuid}}" + database_uuid: "{{linked_databases.test1}}" + register: result + +- name: create linked database map + set_fact: + linked_databases: "{{ linked_databases | default({}) | combine ({ item['name'] : item['id'] }) }}" + loop: "{{result.response}}" + no_log: true + + +- name: check linked database update status + assert: + that: + - result.changed == true + - result.db_instance_uuid is defined + - "'test2' in linked_databases" + fail_msg: "Unable to remove database from database instance" + success_msg: "linked database from database instance removed successfully" + + +############################################ cleanup ########################################### + + +- name: delete database created earlier + ntnx_ndb_databases: + state: "absent" + db_uuid: "{{db_uuid}}" + delete_db_server_vms: true + delete_time_machine: True + register: result + +- name: verify status of delete of database along with time machine and db server vm delete + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + - result.response.db_server_vms_delete_status.status == "5" + fail_msg: "database delete failed" + success_msg: "database deleted successfully" diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/main.yml b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/main.yml new file mode 100644 index 000000000..3525a5c66 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false + + block: + - import_tasks: "all_actions.yml" diff --git a/tests/integration/targets/ntnx_ndb_databases_and_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_databases_and_info/meta/main.yml deleted file mode 100644 index e4f447d3a..000000000 --- a/tests/integration/targets/ntnx_ndb_databases_and_info/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_env diff --git a/tests/integration/targets/ntnx_ndb_databases_and_info/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_databases_and_info/tasks/crud.yml deleted file mode 100644 index 6f00a0dbd..000000000 --- a/tests/integration/targets/ntnx_ndb_databases_and_info/tasks/crud.yml +++ /dev/null @@ -1,347 +0,0 @@ -- set_fact: - db_name: "{{ query('community.general.random_string', upper=false, numbers=false, special=false)[0] }}" - db_name_updated: "{{ query('community.general.random_string', upper=false, numbers=false, special=false)[0] }}" - desc: "ansible-created" - desc_updated: "ansible-created-updated" - vm_name: "{{ query('community.general.random_string', upper=false, numbers=false, special=false)[0] }}" - -- debug: - msg: "Starting ntnx_ndb_database crud tests" - -# ############################################################################################ - -- name: Create postgres database instance with new vm spec using check mode - check_mode: yes - ntnx_ndb_databases: - name: "{{db_name}}" - desc: "{{desc}}" - - db_params_profile: - uuid: "TEST_DB_PROFILE_UUID" - - db_vm: - create_new_server: - name: "{{vm_name}}" - password: "{{ndb.password}}" - cluster: - uuid: "TEST_CLUSTER_UUID" - software_profile: - uuid: "{{ndb.software_profile.uuid}}" - network_profile: - uuid: "TEST_NETWORK_UUID" - compute_profile: - uuid: "TEST_COMPUTE_UUID" - pub_ssh_key: "{{ndb.key}}" - - postgres: - listener_port: "5432" - db_name: ansible_test - db_password: "{{ndb.password}}" - db_size: 200 - pre_create_script: "ls" - post_create_script: "ls" - - time_machine: - name: POSTGRES_SERVER_PRAD_TM_1 - desc: POSTGRES_SERVER_PRAD_TM_1_DESC - sla: - uuid: "TEST_SLA_UUID" - schedule: - daily: "11:10:02" - weekly: WEDNESDAY - monthly: 4 - quaterly: JANUARY - yearly: FEBRUARY - log_catchup: 30 - snapshots_per_day: 2 - - tags: - test1: check1 - wait: true - no_log: true - register: db - -- set_fact: - expected_schedule: { - "continuousSchedule": { - "enabled": true, - "logBackupInterval": 30, - "snapshotsPerDay": 2 - }, - "monthlySchedule": { - "dayOfMonth": 4, - "enabled": true - }, - "quartelySchedule": { - "dayOfMonth": 4, - "enabled": true, - "startMonth": "JANUARY" - }, - "snapshotTimeOfDay": { - "hours": 11, - "minutes": 10, - "seconds": 2 - }, - "weeklySchedule": { - "dayOfWeek": "WEDNESDAY", - "enabled": true - }, - "yearlySchedule": { - "dayOfMonth": 4, - "enabled": true, - "month": "FEBRUARY" - } - } - -- name: Verify check mode generated spec - assert: - that: - - db.response is defined - - db.failed == false - - db.changed == false - - db.response.name == db_name - - db.response.databaseDescription == "{{desc}}" - - db.response.actionArguments is defined - - db.response.autoTuneStagingDrive == true - - db.response.clustered == false - - db.response.createDbserver == true - - db.response.nodeCount == 1 - - db.response.nodes | length == 1 - - db.response.nodes[0].networkProfileId == "TEST_NETWORK_UUID" - - db.response.nodes[0].vmName == "{{vm_name}}" - - db.response.dbParameterProfileId == "TEST_DB_PROFILE_UUID" - - db.response.softwareProfileId == "{{ndb.software_profile.uuid}}" - - db.response.softwareProfileVersionId is defined - - db.response.networkProfileId == "TEST_NETWORK_UUID" - - db.response.computeProfileId == "TEST_COMPUTE_UUID" - - db.response.timeMachineInfo is defined - - db.response.timeMachineInfo.name == "POSTGRES_SERVER_PRAD_TM_1" - - db.response.timeMachineInfo.schedule == expected_schedule - - db.response.timeMachineInfo.slaId == "TEST_SLA_UUID" - - db.response.databaseType == "postgres_database" - - db.response.tags[0]["value"] == "check1" - - db.response.tags[0]["tagName"] == "test1" - fail_msg: "Unable to create spec for database instance in check mode" - success_msg: "Created check mode spec for database instance successfully" - -# ############################################################################################ - -- name: Create postgres database instance using with new vm - ntnx_ndb_databases: - name: "{{db_name}}" - desc: "{{desc}}" - - db_params_profile: - name: "{{ndb.db_params}}" - - db_vm: - create_new_server: - name: "{{vm_name}}" - password: "{{ndb.password}}" - cluster: - name: "{{ndb.cluster}}" - software_profile: - name: "{{ndb.software_profile.name}}" - network_profile: - name: "{{ndb.network_profile}}" - compute_profile: - name: "{{ndb.compute_profile}}" - pub_ssh_key: "{{ndb.key}}" - - postgres: - listener_port: "5432" - db_name: ansible_test - db_password: "{{ndb.password}}" - db_size: 200 - pre_create_script: "ls" - post_create_script: "ls" - - time_machine: - name: POSTGRES_SERVER_PRAD_TM_1 - desc: POSTGRES_SERVER_PRAD_TM_1_DESC - sla: - name: "{{ndb.sla_name}}" - schedule: - daily: "11:10:02" - weekly: WEDNESDAY - monthly: 4 - quaterly: JANUARY - yearly: FEBRUARY - log_catchup: 30 - snapshots_per_day: 2 - - tags: - test1: check1 - wait: true - no_log: true - register: db - -- name: Verify attributes - assert: - that: - - db.response is defined - - db.failed == false - - db.changed == true - - db.db_uuid is defined - - db.response.status == "READY" - - db.response.tags | length == 1 - - db.response.type == "postgres_database" - - db.response.name == "{{db_name}}" - - db.response.description == "{{desc}}" - - db.response.tags[0]["value"] == "check1" - - db.response.tags[0]["tagName"] == "test1" - fail_msg: "Unable to create database instance" - success_msg: "Created database instance successfully" - -############################################################################################ - -- name: update db instance - ntnx_ndb_databases: - db_uuid: "{{db.db_uuid}}" - name: "{{db_name_updated}}" - desc: "{{desc_updated}}" - tags: - test1: check1_updated - test2: check2_updated - no_log: true - register: db - -- set_fact: - expected_tags: { - "test1": "check1_updated", - "test2": "check2_updated" - } - -- name: Verify attributes - assert: - that: - - db.response is defined - - db.failed == false - - db.changed == true - - db.db_uuid == "{{db.db_uuid}}" - - db.response.status == "READY" - - db.response.tags | length == 2 - - db.response.type == "postgres_database" - - db.response.name == "{{db_name_updated}}" - - db.response.description == "{{desc_updated}}" - - expected_tags[db.response.tags[0].tagName] == db.response.tags[0].value - - expected_tags[db.response.tags[1].tagName] == db.response.tags[1].value - - fail_msg: "Unable to update database instance" - success_msg: "Database instance updated successfully" - -############################################################################################ - -- name: idempotency check, update db instance with same spec - ntnx_ndb_databases: - db_uuid: "{{db.db_uuid}}" - name: "{{db_name_updated}}" - desc: "{{desc_updated}}" - tags: - test1: check1_updated - test2: check2_updated - register: result - no_log: true - ignore_errors: True - -- name: idempotency check status - assert: - that: - - result.changed == False - - result.failed == False - - "'Nothing to change' in result.msg" - fail_msg: "Idempotency check failed" - success_msg: "Database instance updated call skipped successfully" - -#####################################INFO Module tests####################################################### - -- debug: - msg: Start testing ntnx_ndb_databases_info based on created database - -- name: List era databases - ntnx_ndb_databases_info: - register: databases - no_log: true - -- name: check listing status - assert: - that: - - databases.response is defined - - databases.failed == false - - databases.changed == false - - databases.response | length > 0 - fail_msg: "Unable to list all era databases" - success_msg: "era databases listed successfully" -################################################################ -- name: Get era databases using its name - ntnx_ndb_databases_info: - name: "{{databases.response[0].name}}" - register: result - no_log: true - -- name: check listing status - assert: - that: - - result.response is defined - - result.failed == false - - result.changed == false - - result.response.id == "{{databases.response[0].id}}" - fail_msg: "Unable to Get era databases using its name" - success_msg: "Get era databases using its name finished successfully" -################################################################ -- name: Get era databases using its name - ntnx_ndb_databases_info: - uuid: "{{databases.response[0].id}}" - register: result - no_log: true - -- name: check listing status - assert: - that: - - result.response is defined - - result.failed == false - - result.changed == false - - result.response.name == "{{databases.response[0].name}}" - fail_msg: "Unable to Get era databases using its id" - success_msg: "Get era databases using its id finished successfully" - -################################################################ - -- name: get era database with incorrect name - ntnx_ndb_databases_info: - name: "xxxxxxx" - register: result - ignore_errors: True - no_log: true - -- name: check listing status - assert: - that: - - result.error is defined - - result.failed == true - - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" - -############################################################################################ - -- name: delete database - ntnx_ndb_databases: - state: "absent" - db_uuid: "{{db.db_uuid}}" - delete_time_machine: True - wait: true - no_log: true - register: db - -- name: Verify attributes - assert: - that: - - db.response is defined - - db.failed == false - - db.changed == true - - db.response.status == "5" - fail_msg: "Unable to delete database instance" - success_msg: "Database instance deleted successfully" - -############################################################################################ diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/aliases b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/meta/main.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/meta/main.yml new file mode 100644 index 000000000..ea2e9da19 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/readme.md b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/readme.md new file mode 100644 index 000000000..5b9468ff1 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/readme.md @@ -0,0 +1,3 @@ +### Modules Tested: +1. ntnx_ndb_databases +2. ntns_ndb_register_database \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/main.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/main.yml new file mode 100644 index 000000000..d09f77ab1 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false + + block: + - import_tasks: "tests.yml" diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml new file mode 100644 index 000000000..817de3937 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml @@ -0,0 +1,825 @@ +--- +# Summary: +# This playbook will test below cases: +# 1. Single instance postgres database creation on new db server vm +# 2. Update database instance +# 3. Unregister database instance +# 4. Register the database created in point 1 from the db server vm created earlier + +- debug: + msg: "start ndb databases test flow 1" + +- name: Generate random name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + +- set_fact: + db1_name: "{{random_name[0]}}" + db1_name_updated: "{{random_name[0]}}-updated" + vm1_name: "{{random_name[0]}}-vm" + +################################### Single instance postgres database tests ############################# + + +- name: create spec for single instance postgres database on new db server vm + check_mode: yes + ntnx_ndb_databases: + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + create_new_server: + name: "{{ vm1_name }}" + desc: vm for db server + password: "test_password" + cluster: + name: "{{cluster.cluster1.name}}" + software_profile: + name: "{{ software_profile.name }}" + network_profile: + name: "{{ network_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + pub_ssh_key: "test_key" + + postgres: + listener_port: "9999" + db_name: testAnsible + db_password: "test_password" + db_size: 200 + type: "single" + auto_tune_staging_drive: false + allocate_pg_hugepage: true + pre_create_script: "ls" + post_create_script: "ls -a" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + auto_tune_staging_drive: False + tags: + ansible-databases: "single-instance-dbs" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + register: result + +- set_fact: + expected_action_arguments: [ + { + "name": "dbserver_description", + "value": "vm for db server" + }, + { + "name": "listener_port", + "value": "9999" + }, + { + "name": "auto_tune_staging_drive", + "value": false + }, + { + "name": "allocate_pg_hugepage", + "value": True + }, + { + "name": "cluster_database", + "value": false + }, + { + "name": "auth_method", + "value": "md5" + }, + { + "name": "db_password", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + }, + { + "name": "pre_create_script", + "value": "ls" + }, + { + "name": "post_create_script", + "value": "ls -a" + }, + { + "name": "database_names", + "value": "testAnsible" + }, + { + "name": "database_size", + "value": "200" + } + ] + +- set_fact: + expected_time_machine_info: { + "autoTuneLogDrive": true, + "description": "TM-desc", + "name": "TM1", + "schedule": { + "continuousSchedule": { + "enabled": true, + "logBackupInterval": 30, + "snapshotsPerDay": 2 + }, + "monthlySchedule": { + "dayOfMonth": 4, + "enabled": true + }, + "quartelySchedule": { + "dayOfMonth": 4, + "enabled": true, + "startMonth": "JANUARY" + }, + "snapshotTimeOfDay": { + "hours": 11, + "minutes": 10, + "seconds": 2 + }, + "weeklySchedule": { + "dayOfWeek": "WEDNESDAY", + "enabled": true + } + }, + "slaId": "{{sla.uuid}}" + } + +- set_fact: + mainetance_tasks: { + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": [ + { + "payload": { + "prePostCommand": { + "postCommand": "ls -a", + "preCommand": "ls" + } + }, + "taskType": "OS_PATCHING" + }, + { + "payload": { + "prePostCommand": { + "postCommand": "ls -F", + "preCommand": "ls -l" + } + }, + "taskType": "DB_PATCHING" + } + ] + } + +- name: Check mode status + assert: + that: + - result.response is defined + - result.changed == False + - result.response.name == db1_name + - result.response.databaseDescription == "ansible-created-db-desc" + - result.response.actionArguments == expected_action_arguments + - result.response.computeProfileId == "{{compute_profile.uuid}}" + - result.response.networkProfileId == "{{network_profile.uuid}}" + - result.response.dbParameterProfileId == "{{db_params_profile.uuid}}" + - result.response.softwareProfileId == "{{software_profile.uuid}}" + - result.response.autoTuneStagingDrive == False + - result.response.timeMachineInfo == expected_time_machine_info + - result.response.nodes | length == 1 + - result.response.nodeCount == 1 + - result.response.nodes[0].nxClusterId == "{{cluster.cluster1.uuid}}" + - result.response.maintenanceTasks == mainetance_tasks + - result.response.createDbserver == True + fail_msg: "Unable to create single instance postgres database provision spec" + success_msg: "single instance postgres database provision spec created successfully" + + + +- name: create single instance postgres database on new db server vm + ntnx_ndb_databases: + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + create_new_server: + ip: "{{ vm_ip }}" + name: "{{ vm1_name }}" + desc: "vm for db server" + password: "{{ vm_password }}" + cluster: + name: "{{cluster.cluster1.name}}" + software_profile: + name: "{{ software_profile.name }}" + network_profile: + name: "{{ static_network_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + pub_ssh_key: "{{ public_ssh_key }}" + + postgres: + listener_port: "5432" + db_name: testAnsible + db_password: "{{ vm_password }}" + db_size: 200 + type: "single" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + tags: + ansible-databases: "single-instance-dbs" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + register: result + +- set_fact: + db_uuid: "{{result.db_uuid}}" + +- name: create properties map + set_fact: + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{result.response.properties}}" + no_log: true + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'READY' + - result.db_uuid is defined + - result.changed == true + - result.response.name == db1_name + - result.response.description == "ansible-created-db-desc" + - result.response.type == "postgres_database" + - properties["vm_ip"] == vm_ip + - properties["listener_port"] == "5432" + - properties["db_parameter_profile_id"] == db_params_profile.uuid + - properties["auth"] == "md5" + - result.response.databaseNodes[0].status == "READY" + - result.response.tags | length == 1 + - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].value == "single-instance-dbs" + + fail_msg: "Unable to create single instance postgres database" + success_msg: "single instance postgres database created successfully" + +- set_fact: + db_server_uuid: "{{result.response.databaseNodes[0].dbserverId}}" + +- name: get vm details associated to the database instance created above and verify + ntnx_ndb_db_servers_info: + uuid: "{{db_server_uuid}}" + register: result + +- name: Verify DB server VM status + assert: + that: + - result.response is defined + - result.response.status == 'UP' + - result.changed == False + - result.response.name == vm1_name + - result.response.nxClusterId == cluster.cluster1.uuid + - result.response.description == "vm for db server" + fail_msg: "Unable to verify db server vm" + success_msg: "db server vm created by database instance creation verified successfully" + +################################### update tests ############################# + + +- name: update database with check mode + check_mode: yes + ntnx_ndb_databases: + wait: true + db_uuid: "{{db_uuid}}" + name: "{{db1_name_updated}}" + desc: "ansible-created-db-desc-updated" + + tags: + ansible-databases: "single-instance-dbs-updated" + register: result + +- name: check mode status + assert: + that: + - result.response is defined + - result.changed == False + - result.response.name == db1_name_updated + - result.response.description == "ansible-created-db-desc-updated" + + fail_msg: "Unable to create single instance postgres database update spec" + success_msg: "single instance postgres database update spec generated successfully" + +- name: update database + ntnx_ndb_databases: + wait: true + db_uuid: "{{db_uuid}}" + name: "{{db1_name_updated}}" + desc: "ansible-created-db-desc-updated" + + tags: + ansible-databases: "single-instance-dbs-updated" + register: result + +- name: update status + assert: + that: + - result.response is defined + - result.response.status == 'READY' + - result.db_uuid is defined + - result.changed == true + - result.response.name == db1_name_updated + - result.response.description == "ansible-created-db-desc-updated" + - result.response.tags | length == 1 + - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].value == "single-instance-dbs-updated" + + + fail_msg: "Unable to update single instance postgres database" + success_msg: "single instance postgres database updated successfully" + + +- name: idempotency checks + ntnx_ndb_databases: + wait: true + db_uuid: "{{db_uuid}}" + name: "{{db1_name_updated}}" + desc: "ansible-created-db-desc-updated" + + tags: + ansible-databases: "single-instance-dbs-updated" + register: result + +- name: check idempotency status + assert: + that: + - result.changed == false + - result.failed == false + - "'Nothing to change' in result.msg" + fail_msg: "database got updated" + success_msg: "database update skipped succesfully due to no changes in spec" + +################################### delete tests ############################# + +- name: create spec for delete db from vm + check_mode: yes + ntnx_ndb_databases: + state: "absent" + db_uuid: "{{db_uuid}}" + wait: true + delete_db_from_vm: true + register: result + +- name: verify delete check mode spec + assert: + that: + - result.changed == false + - result.failed == false + - result.response.delete == True + - result.response.remove == False + - result.response.deleteTimeMachine == False + fail_msg: "creation of spec for delete db from vm failed" + success_msg: "spec for delete db from vm created successfully" + + + +- name: create spec for soft remove + check_mode: yes + ntnx_ndb_databases: + state: "absent" + db_uuid: "{{db_uuid}}" + wait: true + soft_delete: true + delete_time_machine: true + register: result + +- name: verify soft remove check mode spec + assert: + that: + - result.changed == false + - result.failed == false + - result.response.delete == False + - result.response.remove == False + - result.response.softRemove == True + - result.response.deleteTimeMachine == True + fail_msg: "creation of spec for soft remove with time machine delete failed" + success_msg: "spec for soft remove with time machine delete created successfully" + + +- name: unregister db along with delete time machine + ntnx_ndb_databases: + state: "absent" + db_uuid: "{{db_uuid}}" + wait: true + delete_time_machine: true + register: result + +- name: verify status of unregister of database with time machine delete + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + fail_msg: "database unregistration failed" + success_msg: "database unregistered successfully" + +################################### single instance postgres database registration tests ############################# + + +- name: create spec for registering previously unregistered database from previously created VM's ip + check_mode: yes + ntnx_ndb_register_database: + wait: true + + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + auto_tune_staging_drive: False + db_vm: + registered: + ip: "10.10.10.10" + + postgres: + listener_port: "9999" + db_name: testAnsible1 + db_password: "{{vm_password}}" + software_path: "{{postgres.software_home}}" + db_user: "postgres" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + + tags: + ansible-databases: "single-instance-dbs" + working_directory: "/check" + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: result + +- set_fact: + expected_action_arguments: [ + { + "name": "listener_port", + "value": "9999" + }, + { + "name": "db_name", + "value": "testAnsible1" + }, + { + "name": "db_user", + "value": "postgres" + }, + { + "name": "db_password", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + }, + { + "name": "postgres_software_home", + "value": "{{postgres.software_home}}" + } + ] + +- set_fact: + expected_time_machine_info: { + "autoTuneLogDrive": true, + "description": "TM-desc", + "name": "TM1", + "schedule": { + "continuousSchedule": { + "enabled": true, + "logBackupInterval": 30, + "snapshotsPerDay": 2 + }, + "monthlySchedule": { + "dayOfMonth": 4, + "enabled": true + }, + "quartelySchedule": { + "dayOfMonth": 4, + "enabled": true, + "startMonth": "JANUARY" + }, + "snapshotTimeOfDay": { + "hours": 11, + "minutes": 10, + "seconds": 2 + }, + "weeklySchedule": { + "dayOfWeek": "WEDNESDAY", + "enabled": true + } + }, + "slaId": "{{sla.uuid}}" + } + +- set_fact: + mainetance_tasks: { + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": [ + { + "payload": { + "prePostCommand": { + "postCommand": "ls -a", + "preCommand": "ls" + } + }, + "taskType": "OS_PATCHING" + }, + { + "payload": { + "prePostCommand": { + "postCommand": "ls -F", + "preCommand": "ls -l" + } + }, + "taskType": "DB_PATCHING" + } + ] + } + +- name: Check mode status + assert: + that: + - result.response is defined + - result.changed == False + - result.response.databaseName == db1_name + - result.response.description == "ansible-created-db-desc" + - result.response.actionArguments == expected_action_arguments + - result.response.databaseType == "postgres_database" + - result.response.autoTuneStagingDrive == False + - result.response.timeMachineInfo == expected_time_machine_info + - result.response.vmIp == "10.10.10.10" + - result.response.maintenanceTasks == mainetance_tasks + - result.response.workingDirectory == "/check" + fail_msg: "Unable to create register database spec" + success_msg: "single instance postgres database register spec created successfully" + + +- name: regsiter previously unregistered database from previously created VM + ntnx_ndb_register_database: + wait: true + + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_vm: + registered: + name: "{{vm1_name}}" + + postgres: + db_name: testAnsible1 + db_password: "{{vm_password}}" + software_path: "{{postgres.software_home}}" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + + tags: + ansible-databases: "single-instance-dbs" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: result + +- name: create properties map + set_fact: + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{result.response.properties}}" + no_log: true + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'READY' + - result.db_uuid is defined + - result.changed == true + - result.response.name == db1_name + - result.response.description == "ansible-created-db-desc" + - properties["vm_ip"] == vm_ip + - properties["listener_port"] == "5432" + - result.response.databaseNodes | length == 1 + - result.response.databaseNodes[0].status == "READY" + - result.response.databaseNodes[0].dbserverId == db_server_uuid + - result.response.type == "postgres_database" + - result.response.tags | length == 1 + - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].value == "single-instance-dbs" + + fail_msg: "Unable to register single instance postgres database" + success_msg: "single instance postgres database registered successfully" + + +- set_fact: + db_uuid: "{{result.db_uuid}}" +#####################################INFO Module tests####################################################### + +- debug: + msg: Start testing ntnx_ndb_databases_info based on created database + +- name: List ndb databases + ntnx_ndb_databases_info: + register: databases + no_log: true + +- name: check listing status + assert: + that: + - databases.response is defined + - databases.failed == false + - databases.changed == false + - databases.response | length > 0 + fail_msg: "Unable to list all era databases" + success_msg: "era databases listed successfully" +################################################################ +- name: Get era databases using its name + ntnx_ndb_databases_info: + name: "{{databases.response[0].name}}" + register: result + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.id == "{{databases.response[0].id}}" + fail_msg: "Unable to Get era databases using its name" + success_msg: "Get era databases using its name finished successfully" +################################################################ +- name: Get era databases using its id + ntnx_ndb_databases_info: + uuid: "{{databases.response[0].id}}" + register: result + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{databases.response[0].name}}" + fail_msg: "Unable to Get era databases using its id" + success_msg: "Get era databases using its id finished successfully" + +################################################################ +- name: Get era databases using its id and detailed response + ntnx_ndb_databases_info: + filters: + detailed: True + uuid: "{{databases.response[0].id}}" + register: result + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{databases.response[0].name}}" + - result.response.timeMachine is defined + + fail_msg: "Unable to Get era databases using its id" + success_msg: "Get era databases using its id finished successfully" + + +################################################################ + +- name: get era database with incorrect name + ntnx_ndb_databases_info: + name: "xxxxxxx" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.error is defined + - result.failed == true + - result.changed == false + fail_msg: "module didn't errored out correctly when incorrect name is given" + success_msg: "module errored out correctly when incorrect name is given" + +############################################################################################ + + +- name: unregister db along with delete time machine + ntnx_ndb_databases: + db_uuid: "{{db_uuid}}" + state: "absent" + wait: true + delete_time_machine: true + register: result + +- name: verify status of delete of database along with time machine delete + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + fail_msg: "database delete failed" + success_msg: "database deleted successfully" + + +- name: delete db server vm + ntnx_ndb_db_server_vms: + state: "absent" + wait: True + uuid: "{{db_server_uuid}}" + delete_from_cluster: True + delete_vgs: True + delete_vm_snapshots: True + register: result + +- name: check delete status + assert: + that: + - result.response is defined + - result.changed == True + - result.response.status == "5" + + fail_msg: "Unable to delete db server vm" + success_msg: "DB server VM deleted successfully" diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/aliases b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/meta/main.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/meta/main.yml new file mode 100644 index 000000000..ea2e9da19 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/readme.md b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/readme.md new file mode 100644 index 000000000..5b9468ff1 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/readme.md @@ -0,0 +1,3 @@ +### Modules Tested: +1. ntnx_ndb_databases +2. ntns_ndb_register_database \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/main.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/main.yml new file mode 100644 index 000000000..d09f77ab1 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false + + block: + - import_tasks: "tests.yml" diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml new file mode 100644 index 000000000..a181413eb --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml @@ -0,0 +1,379 @@ +--- +# Summary: +# This playbook will test below cases: +# 1. Create DB server VM to host new database instance (vm1) +# 2. Create single instance postgres database (db1) on existing registered vm (vm1) +# 3. Unregister database instance (db1) and db server vm (vm1) +# 4. Register db1 from unregistered vm1 + +- debug: + msg: "start ndb databases test flow 2" + +- name: Generate random name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + +- set_fact: + db1_name: "{{random_name[0]}}" + db1_name_updated: "{{random_name[0]}}-updated" + vm1_name: "{{random_name[0]}}-vm" + +- name: create db server vm using software profile + ntnx_ndb_db_server_vms: + wait: True + name: "{{ vm1_name }}" + desc: "ansible-created-vm1-desc" + software_profile: + name: "{{ software_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + network_profile: + name: "{{ network_profile.name }}" + cluster: + name: "{{ cluster.cluster1.name }}" + password: "{{ vm_password }}" + pub_ssh_key: "{{ public_ssh_key }}" + time_zone: "UTC" + database_type: "postgres_database" + register: result + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'UP' + - result.uuid is defined + - result.changed == true + fail_msg: "Unable to create db server vm using software profile" + success_msg: "DB server VM created successfully" + +- set_fact: + db_server_uuid: "{{ result.uuid }}" + +- set_fact: + _vm_ip: "{{ result.response.ipAddresses[0] }}" + + +- name: create new single instance postgres database on vm created earlier + ntnx_ndb_databases: + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + use_registered_server: + name: "{{ vm1_name }}" + + postgres: + db_name: testAnsible + db_password: "{{ vm_password }}" + db_size: 200 + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + + tags: + ansible-databases: "single-instance-dbs" + register: result + +- set_fact: + db_uuid: "{{result.db_uuid}}" + +- name: create properties map + set_fact: + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{result.response.properties}}" + no_log: true + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'READY' + - result.db_uuid is defined + - result.changed == true + - result.response.name == db1_name + - result.response.description == "ansible-created-db-desc" + - result.response.type == "postgres_database" + - properties["listener_port"] == "5432" + - properties["db_parameter_profile_id"] == db_params_profile.uuid + - result.response.databaseNodes[0].status == "READY" + - result.response.databaseNodes[0].dbserverId == db_server_uuid + - result.response.tags | length == 1 + - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].value == "single-instance-dbs" + - result.response.timeMachine is defined + - result.response.timeMachine.name == "TM1" + - result.response.timeMachine.description == "TM-desc" + - result.response.timeMachine.sla is defined + - result.response.timeMachine.sla.id == sla.uuid + + fail_msg: "Unable to create single instance postgres database" + success_msg: "single instance postgres database created successfully" + + +- name: unregister db along with delete time machine and unregister db servr vm + ntnx_ndb_databases: + state: "absent" + db_uuid: "{{db_uuid}}" + wait: true + delete_time_machine: true + unregister_db_server_vms: True + register: result + +- name: verify status of unregister of database with time machine delete + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + - result.response.db_server_vms_delete_status.status == "5" + fail_msg: "database unregistration failed" + success_msg: "database unregistered successfully" + + +- name: create spec for registering previously unregistered DB from previously unregistered DB server vm + check_mode: yes + ntnx_ndb_register_database: + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_vm: + unregistered: + ip: "{{_vm_ip}}" + username: "{{vm_username}}" + password: "{{vm_password}}" + desc: "vm-desc-updated" + reset_desc_in_ntnx_cluster: true + cluster: + name: "{{cluster.cluster1.name}}" + + postgres: + db_name: testAnsible1 + db_password: "{{vm_password}}" + software_path: "{{postgres.software_home}}" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + + tags: + ansible-databases: "single-instance-dbs" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: result + + +- set_fact: + expected_action_arguments: [ + { + "name": "vmIp", + "value": "{{_vm_ip}}" + }, + { + "name": "listener_port", + "value": "5432" + }, + { + "name": "db_name", + "value": "testAnsible1" + }, + { + "name": "db_user", + "value": "postgres" + }, + { + "name": "db_password", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + }, + { + "name": "postgres_software_home", + "value": "{{postgres.software_home}}" + } + ] + +- set_fact: + expected_time_machine_info: { + "autoTuneLogDrive": true, + "description": "TM-desc", + "name": "TM1", + "schedule": {}, + "slaId": "{{sla.uuid}}" + } + +- set_fact: + mainetance_tasks: { + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": [ + { + "payload": { + "prePostCommand": { + "postCommand": "ls -a", + "preCommand": "ls" + } + }, + "taskType": "OS_PATCHING" + }, + { + "payload": { + "prePostCommand": { + "postCommand": "ls -F", + "preCommand": "ls -l" + } + }, + "taskType": "DB_PATCHING" + } + ] + } + +- name: Check mode status + assert: + that: + - result.response is defined + - result.changed == False + - result.response.databaseName == db1_name + - result.response.description == "ansible-created-db-desc" + - result.response.actionArguments == expected_action_arguments + - result.response.vmDescription == "vm-desc-updated" + - result.response.vmIp == _vm_ip + - result.response.vmPassword == "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + - result.response.vmUsername == vm_username + - result.response.resetDescriptionInNxCluster == True + - result.response.databaseType == "postgres_database" + - result.response.timeMachineInfo == expected_time_machine_info + - result.response.nxClusterId == cluster.cluster1.uuid + - result.response.maintenanceTasks == mainetance_tasks + - result.response.workingDirectory == "/tmp" + fail_msg: "Unable to create register database spec" + success_msg: "single instance postgres database register spec created successfully" + + + +- name: register previously unregistered DB from previously unregistered DB server vm + ntnx_ndb_register_database: + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_vm: + unregistered: + ip: "{{_vm_ip}}" + username: "{{vm_username}}" + password: "{{vm_password}}" + desc: "vm-desc-updated" + reset_desc_in_ntnx_cluster: True + cluster: + name: "{{cluster.cluster1.name}}" + + postgres: + db_name: testAnsible1 + db_password: "{{vm_password}}" + software_path: "{{postgres.software_home}}" + db_user: "postgres" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + + tags: + ansible-databases: "single-instance-dbs" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: result + + +- name: create properties map + set_fact: + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{result.response.properties}}" + no_log: true + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'READY' + - result.db_uuid is defined + - result.changed == true + - result.response.name == db1_name + - result.response.description == "ansible-created-db-desc" + - properties["listener_port"] == "5432" + - properties["vm_ip"] == _vm_ip + - result.response.databaseNodes | length == 1 + - result.response.databaseNodes[0].status == "READY" + - result.response.type == "postgres_database" + - result.response.tags | length == 1 + - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].value == "single-instance-dbs" + - result.response.timeMachine is defined + - result.response.timeMachine.name == "TM1" + - result.response.timeMachine.description == "TM-desc" + - result.response.timeMachine.sla is defined + - result.response.timeMachine.sla.id == sla.uuid + + fail_msg: "Unable to register single instance postgres database" + success_msg: "single instance postgres database registered successfully" + +- name: delete db along with delete time machine and db server vms + ntnx_ndb_databases: + db_uuid: "{{result.db_uuid}}" + state: "absent" + wait: true + delete_time_machine: true + unregister_db_server_vms: True + register: result + +- name: verify status of delete of database along with time machine delete + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + - result.response.db_server_vms_delete_status.status == "5" + fail_msg: "database delete failed" + success_msg: "database deleted successfully" \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/aliases b/tests/integration/targets/ntnx_ndb_db_server_vms/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/meta/main.yml b/tests/integration/targets/ntnx_ndb_db_server_vms/meta/main.yml new file mode 100644 index 000000000..ea2e9da19 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/readme.md b/tests/integration/targets/ntnx_ndb_db_server_vms/readme.md new file mode 100644 index 000000000..465d8df4c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/readme.md @@ -0,0 +1,6 @@ +### Summary +This test flow tests db server vm provision, unregister, register, update and delete scenarios. + +### Modules Tested: +1. ntnx_ndb_db_server_vms +2. ntns_ndb_register_db_server_vm \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml new file mode 100644 index 000000000..5b0cc18e7 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml @@ -0,0 +1,950 @@ +--- + +- debug: + msg: "start ntnx_ndb_db_server_vms, ntnx_ndb_register_db_server_vm, ntnx_ndb_db_servers_info and ntnx_ndb_maintenance_tasks tests. Approx Time: < 30 mins" + +- name: Generate random name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + +- set_fact: + vm1_name: "{{random_name[0]}}" + vm1_name_updated: "{{random_name[0]}}-updated" + +################################### DB server VM Provision tests ############################# +- name: create ndb vm using time machine and check mode + check_mode: yes + ntnx_ndb_db_server_vms: + wait: True + name: "ansible-created-vm1-from-time-machine" + desc: "ansible-created-vm1-from-time-machine-time-machine" + time_machine: + uuid: "test_uuid" + snapshot_uuid: "test_snapshot_uuid" + compute_profile: + uuid: "test_compute_uuid" + network_profile: + uuid: "test_network_uuid" + cluster: + uuid: "test_cluster_uuid" + password: "test_password" + pub_ssh_key: "test_public_key" + database_type: "postgres_database" + automated_patching: + maintenance_window: + uuid: "test_window_uuid" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: check_mode_result + +- name: create action_arguments map + set_fact: + action_arguments: "{{ action_arguments | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{check_mode_result.response.actionArguments}}" + no_log: true + +- set_fact: + mainetance_tasks: { + "maintenanceWindowId": "test_window_uuid", + "tasks": [ + { + "payload": { + "prePostCommand": { + "postCommand": "ls -a", + "preCommand": "ls" + } + }, + "taskType": "OS_PATCHING" + }, + { + "payload": { + "prePostCommand": { + "postCommand": "ls -F", + "preCommand": "ls -l" + } + }, + "taskType": "DB_PATCHING" + } + ] + } + +- set_fact: + expected_result: { + "changed": false, + "error": null, + "failed": false, + "response": { + "actionArguments": [ + { + "name": "vm_name", + "value": "ansible-created-vm1-from-time-machine" + }, + { + "name": "client_public_key", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + } + ], + "computeProfileId": "test_compute_uuid", + "databaseType": "postgres_database", + "description": "ansible-created-vm1-from-time-machine-time-machine", + "latestSnapshot": false, + "maintenanceTasks": { + "maintenanceWindowId": "test_window_uuid", + "tasks": [ + { + "payload": { + "prePostCommand": { + "postCommand": "ls -a", + "preCommand": "ls" + } + }, + "taskType": "OS_PATCHING" + }, + { + "payload": { + "prePostCommand": { + "postCommand": "ls -F", + "preCommand": "ls -l" + } + }, + "taskType": "DB_PATCHING" + } + ] + }, + "networkProfileId": "test_network_uuid", + "nxClusterId": "test_cluster_uuid", + "snapshotId": "test_snapshot_uuid", + "softwareProfileId": "", + "softwareProfileVersionId": "", + "timeMachineId": "test_uuid", + "timeZone": "Asia/Calcutta", + "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + }, + "uuid": null + } + +- name: Check mode Status + assert: + that: + - check_mode_result == expected_result + fail_msg: "Unable to generate create db server vm spec with time machine as source" + success_msg: "DB server VM spec created successfully" + + +- name: create spec for db server vm using software profile and names of profile + check_mode: yes + ntnx_ndb_db_server_vms: + wait: True + name: "{{ vm1_name }}" + desc: "ansible-created-vm1-desc" + software_profile: + name: "{{ software_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + network_profile: + name: "{{ network_profile.name }}" + cluster: + name: "{{ cluster.cluster1.name }}" + password: "{{ vm_password }}" + pub_ssh_key: "{{ public_ssh_key }}" + time_zone: "UTC" + database_type: "postgres_database" + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + register: result + +- set_fact: + expected_result: { + "changed": false, + "error": null, + "failed": false, + "response": { + "actionArguments": [ + { + "name": "vm_name", + "value": "{{ vm1_name }}" + }, + { + "name": "client_public_key", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + } + ], + "computeProfileId": "{{ compute_profile.uuid }}", + "databaseType": "postgres_database", + "description": "ansible-created-vm1-desc", + "latestSnapshot": false, + "maintenanceTasks": { + "maintenanceWindowId": "{{ maintenance.window_uuid }}", + "tasks": [ + { + "payload": { + "prePostCommand": { + "postCommand": "ls -a", + "preCommand": "ls" + } + }, + "taskType": "OS_PATCHING" + }, + { + "payload": { + "prePostCommand": { + "postCommand": "ls -F", + "preCommand": "ls -l" + } + }, + "taskType": "DB_PATCHING" + } + ] + }, + "networkProfileId": "{{ network_profile.uuid }}", + "nxClusterId": "{{ cluster.cluster1.uuid }}", + "softwareProfileId": "{{ software_profile.uuid }}", + "softwareProfileVersionId": "{{ software_profile.latest_version_id }}", + "timeZone": "UTC", + "vmPassword": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + }, + "uuid": null + } + +- name: Check mode Status + assert: + that: + - result == expected_result + fail_msg: "Unable to generate create db server vm spec with time machine as source and given names of profile" + success_msg: "DB server VM spec created successfully" + + +- name: create db server vm using software profile + ntnx_ndb_db_server_vms: + wait: True + name: "{{ vm1_name }}" + desc: "ansible-created-vm1-desc" + software_profile: + name: "{{ software_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + network_profile: + name: "{{ network_profile.name }}" + cluster: + name: "{{ cluster.cluster1.name }}" + password: "{{ vm_password }}" + pub_ssh_key: "{{ public_ssh_key }}" + time_zone: "UTC" + database_type: "postgres_database" + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + register: result + + +- name: create properties map + set_fact: + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{result.response.properties}}" + no_log: true + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'UP' + - result.uuid is defined + - result.changed == true + - result.response.name == vm1_name + - result.response.description == "ansible-created-vm1-desc" + - result.response.ipAddresses | length > 0 + - properties["application_type"] == "postgres_database" + - result.response.vmTimeZone == "UTC" + - result.response.nxClusterId == cluster.cluster1.uuid + + fail_msg: "Unable to create db server vm using software profile" + success_msg: "DB server VM created successfully" + +- set_fact: + db_server_uuid: "{{ result.uuid }}" + +- set_fact: + vm_ip: "{{ result.response.ipAddresses[0] }}" + + +################################### DB server VM update Tests ############################# + +- name: update db server vm name, desc, credentials, tags + ntnx_ndb_db_server_vms: + wait: True + uuid: "{{db_server_uuid}}" + name: "{{vm1_name_updated}}" + desc: "ansible-created-vm1-updated-desc" + reset_name_in_ntnx_cluster: True + reset_desc_in_ntnx_cluster: True + update_credentials: + - username: "{{vm_username}}" + password: "{{vm_password}}" + tags: + ansible-db-server-vms: ansible-updated + register: result + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'UP' + - result.uuid is defined + - result.changed == true + - result.response.name == vm1_name_updated + - result.response.description == "ansible-created-vm1-updated-desc" + - result.response.ipAddresses | length > 0 + - result.response.vmClusterName == vm1_name_updated + - result.response.tags[0].tagName == tags.db_server_vm.name + - result.response.tags[0].tagId == tags.db_server_vm.uuid + - result.response.tags[0].value == "ansible-updated" + + fail_msg: "Unable to update db server vm" + success_msg: "DB server VM updated successfully" + +- name: check idempotency + ntnx_ndb_db_server_vms: + wait: True + uuid: "{{db_server_uuid}}" + name: "{{vm1_name_updated}}" + desc: "ansible-created-vm1-updated-desc" + tags: + ansible-db-server-vms: "ansible-updated" + register: result + +- name: check idempotency status + assert: + that: + - result.changed == false + - result.failed == false + - "'Nothing to change' in result.msg" + fail_msg: "db server vm got updated" + success_msg: "db server vm update skipped succesfully due to no changes in state" + + +- name: update db server vm name with check mode and check defaults + check_mode: yes + ntnx_ndb_db_server_vms: + wait: True + uuid: "{{db_server_uuid}}" + update_credentials: + - username: "user" + password: "pass" + register: result + +- name: check mode Status + assert: + that: + - result.response is defined + - result.changed == false + - result.response.resetCredential == True + - result.response.credentials | length == 1 + - result.response.credentials[0].username == "user" + - result.response.resetNameInNxCluster == False + - result.response.resetDescriptionInNxCluster == False + - result.response.resetTags == False + + fail_msg: "Unable to generate check mode spec for update" + success_msg: "DB server VM update spec generated successfully" + +################################################################ Info module tests ############################################################# + +- name: List NDB db_servers + ntnx_ndb_db_servers_info: + register: db_servers + + +- name: check listing status + assert: + that: + - db_servers.response is defined + - db_servers.failed == false + - db_servers.changed == false + - db_servers.response | length > 0 + fail_msg: "Unable to list all NDB db_servers" + success_msg: "NDB db_servers listed successfully" +################################################################ +- name: get NDB db_servers using it's name + ntnx_ndb_db_servers_info: + filters: + load_metrics: true + load_databases: True + value_type: name + value: "{{db_servers.response[0].name}}" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response[0].id == "{{db_servers.response[0].id}}" + - result.response[0].databases is defined + - result.response[0].name == "{{db_servers.response[0].name}}" + - result.response[0].metric is defined + fail_msg: "Unable to get NDB db_servers using it's name and filters " + success_msg: "get NDB db_server using it's name and filters finished successfully" + +################################################################ + +- name: get NDB db_servers using incorrect name + ntnx_ndb_db_servers_info: + filters: + load_metrics: true + load_databases: True + value_type: name + value: "xxxxxxxxxxxxxx" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response | length == 0 + + fail_msg: "listing ndb db servers using incorrect name didn't failed" + success_msg: "Got empty response for incorrect name successfully" +################################################################ +- name: get NDB db_servers using it's ip + ntnx_ndb_db_servers_info: + filters: + value_type: ip + value: "{{db_servers.response[0].ipAddresses[0]}}" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response[0].id == "{{db_servers.response[0].id}}" + - result.response[0].ipAddresses[0] == "{{db_servers.response[0].ipAddresses[0]}}" + fail_msg: "Unable to get NDB db_servers using it's ip " + success_msg: "get NDB db_server using it's ip finished successfully" +################################################################ +- name: get NDB db_servers using it's name + ntnx_ndb_db_servers_info: + name: "{{db_servers.response[0].name}}" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.id == "{{db_servers.response[0].id}}" + - result.response.ipAddresses[0] == "{{db_servers.response[0].ipAddresses[0]}}" + fail_msg: "Unable to get NDB db_servers using it's name " + success_msg: "get NDB db_server using it's name finished successfully" +################################################################ +- name: get NDB db_servers using it's id + ntnx_ndb_db_servers_info: + uuid: "{{db_servers.response[0].id}}" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{db_servers.response[0].name}}" + - result.response.ipAddresses[0] == "{{db_servers.response[0].ipAddresses[0]}}" + fail_msg: "Unable to get NDB db_servers using it's id " + success_msg: "get NDB db_server using it's id finished successfully" +################################################################ +- name: get NDB db_servers using ip + ntnx_ndb_db_servers_info: + server_ip: "{{db_servers.response[0].ipAddresses[0]}}" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{db_servers.response[0].name}}" + - result.response.id == "{{db_servers.response[0].id}}" + fail_msg: "Unable to get NDB db_servers using it's ip " + success_msg: "get NDB db_server using it's ip finished successfully" + +################################################################ + +- name: get NDB db_servers with incorrect name + ntnx_ndb_db_servers_info: + name: "abcd" + register: result + no_log: true + ignore_errors: True + +- name: check listing status + assert: + that: + - result.error is defined + - result.failed == true + - result.changed == false + fail_msg: "module didn't errored out correctly when incorrect name is given" + success_msg: "module errored out correctly when incorrect name is given" + + +################################### maintenance tasks update tests ############################# + +- name: create spec for adding maintenance window tasks to db server vm + check_mode: yes + ntnx_ndb_maintenance_tasks: + db_server_vms: + - name: "{{vm1_name_updated}}" + - uuid: "test_vm_1" + db_server_clusters: + - uuid: "test_cluter_1" + - uuid: "test_cluter_2" + maintenance_window: + name: "{{maintenance.window_name}}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls -a" + post_task_cmd: "ls" + - type: "DB_PATCHING" + pre_task_cmd: "ls -a" + post_task_cmd: "ls" + register: result + +- set_fact: + expected_result: { + "changed": false, + "error": null, + "failed": false, + "response": { + "entities": { + "ERA_DBSERVER": [ + "{{db_server_uuid}}", + "test_vm_1" + ], + "ERA_DBSERVER_CLUSTER": [ + "test_cluter_1", + "test_cluter_2" + ] + }, + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": [ + { + "payload": { + "prePostCommand": { + "postCommand": "ls", + "preCommand": "ls -a" + } + }, + "taskType": "OS_PATCHING" + }, + { + "payload": { + "prePostCommand": { + "postCommand": "ls", + "preCommand": "ls -a" + } + }, + "taskType": "DB_PATCHING" + } + ] + }, + "uuid": "{{maintenance.window_uuid}}" + } + +- name: Check mode status + assert: + that: + - result == expected_result + + fail_msg: "Unable to create spec for adding maintenance tasks for db server vm" + success_msg: "spec for adding maintenance tasks for db server vm created successfully" + + +- name: create spec for removing maintenance window tasks from above created vm + check_mode: yes + ntnx_ndb_maintenance_tasks: + db_server_vms: + - uuid: "{{db_server_uuid}}" + maintenance_window: + uuid: "{{maintenance.window_uuid}}" + tasks: [] + register: result + +- set_fact: + expected_result: { + "changed": false, + "error": null, + "failed": false, + "response": { + "entities": { + "ERA_DBSERVER": [ + "{{db_server_uuid}}" + ] + }, + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": [], + }, + "uuid": "{{maintenance.window_uuid}}" + } + +- name: Check mode status + assert: + that: + - result == expected_result + + fail_msg: "Unable to create spec for removing maintenance tasks for db server vm" + success_msg: "spec for removing maintenance tasks for db server vm created successfully" + + +- name: db server vm already contains some tasks so remove maintenance window tasks from above created vm + ntnx_ndb_maintenance_tasks: + db_server_vms: + - uuid: "{{db_server_uuid}}" + maintenance_window: + uuid: "{{maintenance.window_uuid}}" + tasks: [] + register: result + +- set_fact: + tasks: [] + entity_tasks_associations: "{{result.response.entityTaskAssoc}}" + +- name: entity_tasks_associations can be null so converting it to list + set_fact: + entity_tasks_associations: [] + when: entity_tasks_associations == "" + +- name: creating list of tasks associated to db server vm + set_fact: + tasks: "{{ tasks | default([]) | union ([item]]) }}" + loop: "{{entity_tasks_associations}}" + when: item['entity']['id'] == db_server_uuid + no_log: true + +- name: Check update status + assert: + that: + - result.changed == true + - result.failed == false + - tasks | length == 0 + + fail_msg: "Unable to remove maintenance tasks for given db server vm" + success_msg: "maintenance tasks for given db server vm removed successfully" + + +- name: Add maitenance window task for vm + ntnx_ndb_maintenance_tasks: + db_server_vms: + - name: "{{vm1_name_updated}}" + maintenance_window: + name: "{{maintenance.window_name}}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls -a" + post_task_cmd: "ls" + - type: "DB_PATCHING" + pre_task_cmd: "ls -a" + post_task_cmd: "ls" + register: result + +- set_fact: + tasks: [] + +- set_fact: + entity_tasks_associations: "{{result.response.entityTaskAssoc}}" + +- name: entity_tasks_associations can be null so converting it to list + set_fact: + entity_tasks_associations: [] + when: entity_tasks_associations == None + +- name: creating list of tasks associated with db server vm + set_fact: + tasks: "{{ tasks | default([]) | union ([item]) }}" + loop: "{{entity_tasks_associations}}" + when: item['entity']['id'] == db_server_uuid + no_log: true + +- name: Check update status + assert: + that: + - result.changed == true + - result.failed == false + - tasks | length == 2 + + fail_msg: "Unable to add maintenance tasks for given db server vm" + success_msg: "maintenance tasks for given db server vm added successfully" + +- name: Remove maintenance window tasks from above created vm + ntnx_ndb_maintenance_tasks: + db_server_vms: + - uuid: "{{db_server_uuid}}" + maintenance_window: + uuid: "{{maintenance.window_uuid}}" + tasks: [] + register: result + +- name: Check update status + assert: + that: + - result.changed == true + - result.failed == false + + fail_msg: "Unable to remove maintenance tasks for given db server vm" + success_msg: "maintenance tasks for given db server vm removed successfully" + + +################################### DB server VM unregistration tests ############################# + +- name: generate check mode spec for unregister with default values + check_mode: yes + ntnx_ndb_db_server_vms: + state: "absent" + wait: True + uuid: "{{db_server_uuid}}" + register: result + +- name: check mode Status + assert: + that: + - result.response is defined + - result.changed == false + - result.response.delete == False + - result.response.deleteVgs == False + - result.response.deleteVmSnapshots == False + - result.response.softRemove == False + - result.response.remove == True + + fail_msg: "Unable to generate check mode spec for unregister" + success_msg: "DB server VM unregister spec generated successfully" + + +- name: genereate check mode spec for delete vm with vgs and snapshots + check_mode: yes + ntnx_ndb_db_server_vms: + state: "absent" + uuid: "{{db_server_uuid}}" + delete_from_cluster: True + delete_vgs: True + delete_vm_snapshots: True + register: result + +- name: check mode Status + assert: + that: + - result.response is defined + - result.changed == false + - result.response.delete == True + - result.response.deleteVgs == True + - result.response.deleteVmSnapshots == True + - result.response.softRemove == False + - result.response.remove == False + + fail_msg: "Unable to generate check mode spec for unregister" + success_msg: "DB server VM update spec generated successfully" + + +- name: unregister vm + ntnx_ndb_db_server_vms: + state: "absent" + wait: True + uuid: "{{db_server_uuid}}" + delete_from_cluster: False + soft_remove: True + delete_vgs: True + delete_vm_snapshots: True + register: result + +- name: check mode Status + assert: + that: + - result.response is defined + - result.changed == True + + fail_msg: "Unable to soft remove db server vm" + success_msg: "DB server VM removed successfully" + +################################### DB server VM Registration tests ############################# + + +- name: generate spec for registeration of the previous unregistered vm using check mode + check_mode: yes + ntnx_ndb_register_db_server_vm: + ip: "{{vm_ip}}" + desc: "register-vm-desc" + reset_desc_in_ntnx_cluster: true + cluster: + name: "{{cluster.cluster1.name}}" + postgres: + software_path: "{{postgres.software_home}}" + private_ssh_key: "check-key" + username: "{{vm_username}}" + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + working_directory: "/check" + register: result + +- set_fact: + action_arguments: {} + +- name: create action_arguments map + set_fact: + action_arguments: "{{ action_arguments | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{result.response.actionArguments}}" + no_log: true + +- set_fact: + maintenance_tasks: { + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": [ + { + "payload": { + "prePostCommand": { + "postCommand": "ls -a", + "preCommand": "ls" + } + }, + "taskType": "OS_PATCHING" + }, + { + "payload": { + "prePostCommand": { + "postCommand": "ls -F", + "preCommand": "ls -l" + } + }, + "taskType": "DB_PATCHING" + } + ] + } + +- name: Check mode status + assert: + that: + - result.response is defined + - result.changed == false + - result.response.description == "register-vm-desc" + - result.response.nxClusterUuid == cluster.cluster1.uuid + - result.response.resetDescriptionInNxCluster == true + - result.response.workingDirectory == "/check" + - result.response.vmIp == vm_ip + - result.response.username == vm_username + - action_arguments["listener_port"] == "5432" + - action_arguments["postgres_software_home"] == postgres.software_home + - result.response.maintenanceTasks == maintenance_tasks + + fail_msg: "Unable to create spec for db server vm registration" + success_msg: "DB server VM registration spec generated successfully" + + +- name: register the previous unregistered vm + ntnx_ndb_register_db_server_vm: + ip: "{{vm_ip}}" + desc: "register-vm-desc" + cluster: + name: "{{cluster.cluster1.name}}" + postgres: + listener_port: 5432 + software_path: "{{postgres.software_home}}" + username: "{{vm_username}}" + password: "{{vm_password}}" + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + + register: result + +- name: create properties map + set_fact: + properties1: "{{ properties1 | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{result.response.properties}}" + no_log: true + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'UP' + - result.uuid is defined + - result.changed == true + - result.response.name == vm1_name_updated + - result.response.description == "register-vm-desc" + - result.response.ipAddresses | length > 0 + - result.response.ipAddresses[0] == vm_ip + - properties1["era_user"] == "{{vm_username}}" + - properties1["listener_port"] == "5432" + - properties1["postgres_software_home"] == postgres.software_home + - properties1["working_dir"] == "/tmp" + - properties1["application_type"] == "postgres_database" + - result.response.nxClusterId == cluster.cluster1.uuid + + fail_msg: "Unable to create db server vm using software profile" + success_msg: "DB server VM created successfully" + + +- set_fact: + db_server_uuid: "{{result.uuid}}" + +################################### DB server VM Delete test ############################# + + +- name: unregister db server vm + ntnx_ndb_db_server_vms: + state: "absent" + wait: True + uuid: "{{db_server_uuid}}" + delete_from_cluster: false + delete_vgs: True + delete_vm_snapshots: True + register: result + +- name: check mode Status + assert: + that: + - result.response is defined + - result.changed == True + - result.response.status == "5" + + fail_msg: "Unable to delete db server vm" + success_msg: "DB server VM deleted successfully" diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/main.yml b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/main.yml new file mode 100644 index 000000000..5216bd0e2 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false + block: + - import_tasks: "crud.yml" diff --git a/tests/integration/targets/ntnx_ndb_db_servers_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_db_servers_info/meta/main.yml deleted file mode 100644 index 23b0fb268..000000000 --- a/tests/integration/targets/ntnx_ndb_db_servers_info/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_db_servers_info/tasks/info.yml b/tests/integration/targets/ntnx_ndb_db_servers_info/tasks/info.yml deleted file mode 100644 index 8aa65c2e2..000000000 --- a/tests/integration/targets/ntnx_ndb_db_servers_info/tasks/info.yml +++ /dev/null @@ -1,86 +0,0 @@ ---- - - -- debug: - msg: Start testing ntnx_ndb_db_servers_info - -- name: List era db_servers - ntnx_ndb_db_servers_info: - register: db_servers - - -- name: check listing status - assert: - that: - - db_servers.response is defined - - db_servers.failed == false - - db_servers.changed == false - - db_servers.response | length > 0 - fail_msg: "Unable to list all era db_servers" - success_msg: "era db_servers listed successfully" -################################################################ -- name: get era db_servers using it's name - ntnx_ndb_db_servers_info: - name: "{{db_servers.response[0].name}}" - register: result - -- name: check listing status - assert: - that: - - result.response is defined - - result.failed == false - - result.changed == false - - result.response.id == "{{db_servers.response[0].id}}" - - result.response.ipAddresses[0] == "{{db_servers.response[0].ipAddresses[0]}}" - fail_msg: "Unable to get era db_servers using it's name " - success_msg: "get era db_server using it's name finished successfully" -################################################################ -- name: get era db_servers using it's id - ntnx_ndb_db_servers_info: - uuid: "{{db_servers.response[0].id}}" - register: result - -- name: check listing status - assert: - that: - - result.response is defined - - result.failed == false - - result.changed == false - - result.response.name == "{{db_servers.response[0].name}}" - - result.response.ipAddresses[0] == "{{db_servers.response[0].ipAddresses[0]}}" - fail_msg: "Unable to get era db_servers using it's id " - success_msg: "get era db_server using it's id finished successfully" -################################################################ -- name: get era db_servers using ip - ntnx_ndb_db_servers_info: - server_ip: "{{db_servers.response[0].ipAddresses[0]}}" - register: result - -- name: check listing status - assert: - that: - - result.response is defined - - result.failed == false - - result.changed == false - - result.response.name == "{{db_servers.response[0].name}}" - - result.response.id == "{{db_servers.response[0].id}}" - fail_msg: "Unable to get era db_servers using it's ip " - success_msg: "get era db_server using it's ip finished successfully" - -################################################################ - -- name: get era db_servers with incorrect name - ntnx_ndb_db_servers_info: - name: "abcd" - register: result - no_log: true - ignore_errors: True - -- name: check listing status - assert: - that: - - result.error is defined - - result.failed == true - - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/aliases b/tests/integration/targets/ntnx_ndb_maintenance_windows/aliases new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/aliases @@ -0,0 +1 @@ + diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/meta/main.yml b/tests/integration/targets/ntnx_ndb_maintenance_windows/meta/main.yml new file mode 100644 index 000000000..ea2e9da19 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/readme.md b/tests/integration/targets/ntnx_ndb_maintenance_windows/readme.md new file mode 100644 index 000000000..8735ed118 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/readme.md @@ -0,0 +1,3 @@ +### Modules Tested: +1. ntnx_ndb_maitenance_window +2. ntnx_ndb_maitenance_windows_info diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml new file mode 100644 index 000000000..15f1db254 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/crud.yml @@ -0,0 +1,347 @@ +--- + +- debug: + msg: "start ndb database maintenance winndow tests" + +- name: Generate random name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + +- set_fact: + window1_name: "{{random_name[0]}}1" + window2_name: "{{random_name[0]}}2" + +############################################## create tests #################################### +- name: create spec for maintenance window + check_mode: yes + ntnx_ndb_maintenance_window: + name: "{{window1_name}}" + desc: "anisble-created-window" + schedule: + recurrence: "weekly" + duration: 2 + timezone: "Asia/Calcutta" + start_time: "11:00:00" + day_of_week: "tuesday" + register: result + +- set_fact: + expected_result: { + "changed": false, + "error": null, + "failed": false, + "response": { + "description": "anisble-created-window", + "name": "{{window1_name}}", + "schedule": { + "dayOfWeek": "TUESDAY", + "duration": 2, + "recurrence": "WEEKLY", + "startTime": "11:00:00", + "weekOfMonth": null + }, + "timezone": "Asia/Calcutta" + }, + "uuid": null + } + +- name: Check mode status + assert: + that: + - result == expected_result + fail_msg: "Unable to create spec for creating window" + success_msg: "spec for maintenance window generated successfully" + + +- name: create window with weekly schedule + ntnx_ndb_maintenance_window: + name: "{{window1_name}}" + desc: "anisble-created-window" + schedule: + recurrence: "weekly" + duration: 2 + start_time: "11:00:00" + day_of_week: "tuesday" + timezone: "UTC" + register: result + +- set_fact: + window1_uuid: "{{result.uuid}}" + +- name: create status + assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.uuid is defined + - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" + - result.response.name == window1_name + - result.response.description == "anisble-created-window" + - result.response.schedule.dayOfWeek == "TUESDAY" + - result.response.schedule.recurrence == "WEEKLY" + - result.response.schedule.startTime == "11:00:00" + - result.response.schedule.timeZone == "UTC" + - result.response.schedule.weekOfMonth == None + - result.response.schedule.duration == 2 + + fail_msg: "Unable to create maintenance window with weekly schedule" + success_msg: "maintenance window with weekly schedule created successfully" + + +- name: create window with monthly schedule + ntnx_ndb_maintenance_window: + name: "{{window2_name}}" + desc: "anisble-created-window" + schedule: + recurrence: "monthly" + duration: 2 + start_time: "11:00:00" + day_of_week: "tuesday" + week_of_month: 2 + timezone: "UTC" + + register: result + +- set_fact: + window2_uuid: "{{result.uuid}}" + +- name: create status + assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.uuid is defined + - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" + - result.response.name == window2_name + - result.response.description == "anisble-created-window" + - result.response.schedule.dayOfWeek == "TUESDAY" + - result.response.schedule.recurrence == "MONTHLY" + - result.response.schedule.startTime == "11:00:00" + - result.response.schedule.timeZone == "UTC" + - result.response.schedule.weekOfMonth == 2 + - result.response.schedule.duration == 2 + + + fail_msg: "Unable to create maintenance window with monthly schedule" + success_msg: "maintenance window with monthly schedule created successfully" + +############################################## info module tests #################################### + +- name: Info module check + ntnx_ndb_maintenance_windows_info: + uuid: "{{window2_uuid}}" + + register: result + +- name: Info module status + assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response.name == window2_name + - result.response.id == window2_uuid + - result.uuid == window2_uuid + fail_msg: "Unable to fetch window info" + success_msg: "maintenance window info obtained successfully" + +- name: get all windows + ntnx_ndb_maintenance_windows_info: + register: result + +- name: Info module status + assert: + that: + - result.response is defined + - result.changed == False + - result.failed == False + - result.response | length > 1 + fail_msg: "Unable to fetch all windows" + success_msg: "all maintenance window info obtained successfully" + +############################################## update tests #################################### + + +- name: update window schedule + ntnx_ndb_maintenance_window: + uuid: "{{window2_uuid}}" + name: "{{window2_name}}-updated" + desc: "anisble-created-window-updated" + schedule: + recurrence: "monthly" + duration: 3 + start_time: "12:00:00" + timezone: "UTC" + day_of_week: "wednesday" + week_of_month: 3 + register: result + +- name: update status + assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.uuid is defined + - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" + - result.response.name == "{{window2_name}}-updated" + - result.response.description == "anisble-created-window-updated" + - result.response.schedule.dayOfWeek == "WEDNESDAY" + - result.response.schedule.recurrence == "MONTHLY" + - result.response.schedule.startTime == "12:00:00" + - result.response.schedule.timeZone == "UTC" + - result.response.schedule.weekOfMonth == 3 + - result.response.schedule.duration == 3 + + + fail_msg: "Unable to update maintenance window" + success_msg: "maintenance window updated successfully" + +- name: update schedule type + ntnx_ndb_maintenance_window: + uuid: "{{window2_uuid}}" + schedule: + recurrence: "weekly" + duration: 3 + start_time: "12:00:00" + day_of_week: "wednesday" + timezone: "UTC" + + register: result + +- name: create status + assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.uuid is defined + - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" + - result.response.name == "{{window2_name}}-updated" + - result.response.description == "anisble-created-window-updated" + - result.response.schedule.dayOfWeek == "WEDNESDAY" + - result.response.schedule.recurrence == "WEEKLY" + - result.response.schedule.startTime == "12:00:00" + - result.response.schedule.timeZone == "UTC" + - result.response.schedule.weekOfMonth == None + - result.response.schedule.duration == 3 + + + fail_msg: "Unable to update maintenance window" + success_msg: "maintenance window updated successfully" + +- name: idempotency checks + ntnx_ndb_maintenance_window: + uuid: "{{window2_uuid}}" + name: "{{window2_name}}-updated" + desc: "anisble-created-window-updated" + schedule: + recurrence: "weekly" + duration: 3 + start_time: "12:00:00" + day_of_week: "wednesday" + timezone: "UTC" + + register: result + +- name: check idempotency status + assert: + that: + - result.changed == false + - result.failed == false + - "'Nothing to change' in result.msg" + fail_msg: "window got updated" + success_msg: "window update got skipped due to no state changes" + +- name: updated day of week + ntnx_ndb_maintenance_window: + uuid: "{{window2_uuid}}" + schedule: + day_of_week: "saturday" + + register: result + + +- name: update status + assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.uuid is defined + - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" + - result.response.name == "{{window2_name}}-updated" + - result.response.schedule.dayOfWeek == "SATURDAY" + - result.response.schedule.recurrence == "WEEKLY" + - result.response.schedule.startTime == "12:00:00" + - result.response.schedule.timeZone == "UTC" + - result.response.schedule.weekOfMonth == None + - result.response.schedule.duration == 3 + + + fail_msg: "Unable to update maintenance window" + success_msg: "maintenance window updated successfully" + +- name: just update start time + ntnx_ndb_maintenance_window: + uuid: "{{window2_uuid}}" + schedule: + start_time: "11:00:00" + timezone: "Asia/Calcutta" + + register: result + +- name: update status + assert: + that: + - result.response is defined + - result.changed == True + - result.failed == False + - result.uuid is defined + - result.response.status == "ACTIVE" or result.response.status == "SCHEDULED" + - result.response.name == "{{window2_name}}-updated" + - result.response.schedule.dayOfWeek == "SATURDAY" + - result.response.schedule.recurrence == "WEEKLY" + - result.response.schedule.startTime == "05:30:00" + - result.response.schedule.timeZone == "UTC" + - result.response.schedule.weekOfMonth == None + - result.response.schedule.duration == 3 + + fail_msg: "Unable to update maintenance window" + success_msg: "maintenance window updated successfully" + + +############################################## delete tests #################################### + +- name: delete window 1 + ntnx_ndb_maintenance_window: + uuid: "{{window1_uuid}}" + state: "absent" + register: result + +- name: check delete status + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "success" + fail_msg: "unable to delete window" + success_msg: "window deleted successfully" + +- name: delete window 2 + ntnx_ndb_maintenance_window: + uuid: "{{window2_uuid}}" + state: "absent" + register: result + + +- name: check delete status + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "success" + fail_msg: "unable to delete window" + success_msg: "window deleted successfully" \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/main.yml b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/main.yml new file mode 100644 index 000000000..abf2644d3 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: False + block: + - import_tasks: "crud.yml" diff --git a/tests/integration/targets/ntnx_ndb_profiles/aliases b/tests/integration/targets/ntnx_ndb_profiles/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_profiles/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_profiles/meta/main.yml b/tests/integration/targets/ntnx_ndb_profiles/meta/main.yml new file mode 100644 index 000000000..6397436fc --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_profiles/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml new file mode 100644 index 000000000..e8ef5a45b --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml @@ -0,0 +1,168 @@ +--- +- debug: + msg: Start testing ntnx_ndb_profiles + + +################################################################ +- name: Generate random profile_name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- set_fact: + suffix_name: "ansible-role-mapping" + +- set_fact: + profile1_name: "{{random_name}}{{suffix_name}}1" + profile2_name: "{{random_name}}{{suffix_name}}2" +################################################################ +- name: Verify creation of compute profile with defaults in check mode + ntnx_ndb_profiles: + name: "{{profile1_name}}" + desc: "testdesc" + type: compute + register: result + ignore_errors: true + check_mode: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{profile1_name}}" + - result.response.description == "testdesc" + - result.response.type == "Compute" + - result.response.properties[0].name=="CPUS" + - result.response.properties[1].name=="CORE_PER_CPU" + - result.response.properties[2].name=="MEMORY_SIZE" + - result.response.properties[0].value=="1" + - result.response.properties[1].value=="2" + - result.response.properties[2].value=="16" + fail_msg: "Fail: unable to Verify creation of compute profile with defaults in check mode " + success_msg: "Pass: Verify creation of compute profile with defaults in check mode " +################################################################ +- name: verify creation of compute profile + ntnx_ndb_profiles: + name: "{{profile1_name}}" + desc: "testdesc" + type: compute + compute: + vcpus: 2 + cores_per_cpu: 4 + memory: 8 + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.profile_uuid is defined + - result.response.name == "{{profile1_name}}" + - result.response.description == "testdesc" + - result.response.type == "Compute" + - result.response.versions[0].propertiesMap.CORE_PER_CPU == "4" + - result.response.versions[0].propertiesMap.CPUS == "2" + - result.response.versions[0].propertiesMap.MEMORY_SIZE == "8" + + fail_msg: "Fail: create compute profile finished succesfully " + success_msg: "Pass: Unable to create compute profile" + + +- set_fact: + todelete: "{{ todelete + [ result.profile_uuid ] }}" +################################################################ +- name: verify update of params in compute profile and publish profile + ntnx_ndb_profiles: + name: "{{profile2_name}}" + desc: "newdesc" + profile_uuid: "{{result.profile_uuid}}" + type: compute + compute: + vcpus: 6 + cores_per_cpu: 4 + memory: 5 + publish: true + register: result + ignore_errors: true + + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.profile_uuid is defined + - result.response.profile.name == "{{profile2_name}}" + - result.response.profile.description == "newdesc" + - result.response.profile.type == "Compute" + - result.response.version.propertiesMap.CORE_PER_CPU == "4" + - result.response.version.propertiesMap.CPUS == "6" + - result.response.version.propertiesMap.MEMORY_SIZE == "5" + - result.response.version.published == true + fail_msg: "Fail: unable to verify update of params in compute profile and publish profile" + success_msg: "Pass: verify update of params in compute profile and publish profile finished succesfully" +################################################################ +- name: verify idempotency check in compute profile + ntnx_ndb_profiles: + profile_uuid: "{{result.profile_uuid}}" + name: "{{profile2_name}}" + desc: "newdesc" + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.profile_uuid is defined + fail_msg: "Fail: unable to verify idempotency check in compute profile" + success_msg: "Pass: verify idempotency check in compute profile finished succesfully" +################################################################ +- name: verify unpublish flow in compute profile + ntnx_ndb_profiles: + profile_uuid: "{{result.profile_uuid}}" + + type: compute + compute: + publish: false + register: result + ignore_errors: true + + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.profile_uuid is defined + - result.response.version.published == false + fail_msg: "Fail: unable to verify unpublish flow in compute profile " + success_msg: "Pass: verify unpublish flow in compute profile finished succesfully" +################################################################ +- name: Delete all created cmpute profiles + ntnx_ndb_profiles: + state: absent + profile_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: True + +- name: check listing status + assert: + that: + - result.changed is defined + - result.changed == true + - result.msg == "All items completed" + fail_msg: "unable to delete all created compute profiles" + success_msg: "All compute profiles deleted succesfully" + +- set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml new file mode 100644 index 000000000..45c2622a6 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml @@ -0,0 +1,228 @@ +- name: Generate random profile_name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- set_fact: + suffix_name: "ansible-role-mapping" + max_connections: 50 + max_replication_slots: 5 + max_locks_per_transaction: 32 + effective_io_concurrency: 2 + timezone: UTC + max_prepared_transactions: 2 + max_wal_senders: 5 + min_wal_size: 9 + max_wal_size: 1 + wal_keep_segments: 500 + max_worker_processes: 4 + checkpoint_timeout: 55 + autovacuum: "off" + checkpoint_completion_target: 0.7 + autovacuum_freeze_max_age: 100000000 + autovacuum_vacuum_threshold: 40 + autovacuum_vacuum_scale_factor: 0.3 + autovacuum_work_mem: 1 + autovacuum_max_workers: 2 + autovacuum_vacuum_cost_delay: 22 + wal_buffers: 1 + synchronous_commit: local + random_page_cost: 3 + +- set_fact: + profile1_name: "{{random_name}}{{suffix_name}}1" + profile2_name: "{{random_name}}{{suffix_name}}2" + profile3_name: "{{random_name}}{{suffix_name}}3" + profile4_name: "{{random_name}}{{suffix_name}}4" +################################################################ +- name: Verify creation of db params profile + ntnx_ndb_profiles: + name: "{{profile1_name}}" + desc: "testdesc" + type: database_parameter + database_type: postgres + database_parameter: + postgres: + max_connections: "{{max_connections}}" + max_replication_slots: "{{max_replication_slots}}" + max_locks_per_transaction: "{{max_locks_per_transaction}}" + effective_io_concurrency: "{{effective_io_concurrency}}" + timezone: "{{timezone}}" + max_prepared_transactions: "{{max_prepared_transactions}}" + max_wal_senders: "{{max_wal_senders}}" + min_wal_size: "{{min_wal_size}}" + max_wal_size: "{{max_wal_size}}" + wal_keep_segments: "{{wal_keep_segments}}" + max_worker_processes: "{{max_worker_processes}}" + checkpoint_timeout: "{{checkpoint_timeout}}" + autovacuum: "{{autovacuum}}" + checkpoint_completion_target: "{{checkpoint_completion_target}}" + autovacuum_freeze_max_age: "{{autovacuum_freeze_max_age}}" + autovacuum_vacuum_threshold: "{{autovacuum_vacuum_threshold}}" + autovacuum_vacuum_scale_factor: "{{autovacuum_vacuum_scale_factor}}" + autovacuum_work_mem: "{{autovacuum_work_mem}}" + autovacuum_max_workers: "{{autovacuum_max_workers}}" + autovacuum_vacuum_cost_delay: "{{autovacuum_vacuum_cost_delay}}" + wal_buffers: "{{wal_buffers}}" + synchronous_commit: "{{synchronous_commit}}" + random_page_cost: "{{random_page_cost}}" + register: result + ignore_errors: true + + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.response.name == "{{profile1_name}}" + - result.response.description == "testdesc" + - result.response.type == "Database_Parameter" + - result.response.versions[0].propertiesMap.autovacuum == "{{autovacuum}}" + - result.response.versions[0].propertiesMap.autovacuum_freeze_max_age == "{{autovacuum_freeze_max_age}}" + - result.response.versions[0].propertiesMap.autovacuum_max_workers == "{{autovacuum_max_workers}}" + - result.response.versions[0].propertiesMap.autovacuum_vacuum_cost_delay == "{{autovacuum_vacuum_cost_delay}}ms" + - result.response.versions[0].propertiesMap.autovacuum_vacuum_scale_factor == "{{autovacuum_vacuum_scale_factor}}" + - result.response.versions[0].propertiesMap.autovacuum_vacuum_threshold == "{{autovacuum_vacuum_threshold}}" + - result.response.versions[0].propertiesMap.autovacuum_work_mem == "{{autovacuum_work_mem}}" + - result.response.versions[0].propertiesMap.checkpoint_completion_target == "{{checkpoint_completion_target}}" + - result.response.versions[0].propertiesMap.checkpoint_timeout == "{{checkpoint_timeout}}min" + - result.response.versions[0].propertiesMap.effective_io_concurrency == "{{effective_io_concurrency}}" + - result.response.versions[0].propertiesMap.max_connections == "{{max_connections}}" + - result.response.versions[0].propertiesMap.max_locks_per_transaction == "{{max_locks_per_transaction}}" + - result.response.versions[0].propertiesMap.max_prepared_transactions == "{{max_prepared_transactions}}" + - result.response.versions[0].propertiesMap.max_replication_slots == "{{max_replication_slots}}" + - result.response.versions[0].propertiesMap.max_wal_senders == "{{max_wal_senders}}" + - result.response.versions[0].propertiesMap.max_wal_size == "{{max_wal_size}}GB" + - result.response.versions[0].propertiesMap.max_worker_processes == "{{max_worker_processes}}" + - result.response.versions[0].propertiesMap.min_wal_size == "{{min_wal_size}}MB" + - result.response.versions[0].propertiesMap.random_page_cost == "{{random_page_cost}}" + - result.response.versions[0].propertiesMap.synchronous_commit == "{{synchronous_commit}}" + - result.response.versions[0].propertiesMap.timezone == "{{timezone}}" + - result.response.versions[0].propertiesMap.wal_buffers == "{{wal_buffers}}" + - result.response.versions[0].propertiesMap.wal_keep_segments == "{{wal_keep_segments}}" + fail_msg: "Fail: Unable to create db params profile " + success_msg: "Pass: Creation of db params profile finished succesfully " + +- set_fact: + todelete: "{{ todelete + [ result.profile_uuid ] }}" +################################################################ +- name: verify update of params in database_parameter profile and publish profile + ntnx_ndb_profiles: + name: "{{profile2_name}}" + desc: "newdesc" + profile_uuid: "{{result.profile_uuid}}" + type: database_parameter + database_type: postgres + database_parameter: + publish: true + postgres: + max_connections: 1 + max_replication_slots: 2 + max_locks_per_transaction: 3 + effective_io_concurrency: 4 + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.profile_uuid is defined + - result.response.profile.name == "{{profile2_name}}" + - result.response.profile.description == "newdesc" + - result.response.version.published == true + - result.response.profile.versions[0].propertiesMap.max_connections == "1" + - result.response.profile.versions[0].propertiesMap.max_replication_slots == "2" + - result.response.profile.versions[0].propertiesMap.max_locks_per_transaction == "3" + - result.response.profile.versions[0].propertiesMap.effective_io_concurrency == "4" + fail_msg: "Fail: unable to verify update of params in database_parameter profile and publish profile " + success_msg: "Pass: verify update of params in database_parameter profile and publish profile finished succesfully" +################################################################ +- name: verify unpublish flow in database_parameter profile + ntnx_ndb_profiles: + profile_uuid: "{{result.profile_uuid}}" + database_parameter: + publish: false + type: database_parameter + database_type: postgres + register: result + ignore_errors: true + + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.profile_uuid is defined + - result.response.profile.versions[0].published == false + fail_msg: "Fail: verify unpublish flow in database_parameter profile " + success_msg: "Pass: verify unpublish flow in database_parameter profile finished succesfully " +################################################################ +- name: verify creatition of db params profile with defaults + ntnx_ndb_profiles: + name: "{{profile3_name}}" + desc: "testdesc" + type: database_parameter + database_type: postgres + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.response.name == "{{profile3_name}}" + - result.response.description == "testdesc" + - result.response.type == "Database_Parameter" + - result.response.versions is defined + fail_msg: "Fail: Unable to verify creatition of db params profile with defaults " + success_msg: "Pass: verify creatition of db params profile with defaults finished succesfully " + +- set_fact: + todelete: "{{ todelete + [ result.profile_uuid ] }}" +################################################################ +- name: verify idempotency check + ntnx_ndb_profiles: + name: "{{profile3_name}}" + desc: "testdesc" + profile_uuid: "{{result.profile_uuid}}" + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.profile_uuid is defined + - result.response.profile.name == "{{profile3_name}}" + - result.response.profile.description == "testdesc" + fail_msg: "Fail: Unable to verify idempotency check " + success_msg: "Pass: verify idempotency check finished succesfully" +################################################################ +- name: Delete all created Database_Parameter profiles + ntnx_ndb_profiles: + state: absent + profile_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: True + +- name: check listing status + assert: + that: + - result.changed is defined + - result.changed == true + - result.msg == "All items completed" + fail_msg: "unable to delete all created Database_Parameter profiles" + success_msg: "All Database_Parameter profiles deleted succesfully" + +- set_fact: + todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/main.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/main.yml new file mode 100644 index 000000000..43729cff4 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ndb_ip}}" + nutanix_username: "{{ndb_username}}" + nutanix_password: "{{ndb_password}}" + validate_certs: false + block: + - import_tasks: "compute.yml" + - import_tasks: "db_params.yml" + - import_tasks: "network_profile.yml" diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml new file mode 100644 index 000000000..6926d39b1 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml @@ -0,0 +1,192 @@ +- name: Generate random profile_name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- set_fact: + suffix_name: "ansible-role-mapping" + +- set_fact: + todelete: [] + profile1_name: "{{random_name}}{{suffix_name}}1" + profile2_name: "{{random_name}}{{suffix_name}}2" + profile3_name: "{{random_name}}{{suffix_name}}3" +################################################################ +- name: verify create of single cluster network profile + ntnx_ndb_profiles: + name: "{{profile1_name}}" + desc: "testdesc" + type: network + database_type: postgres + network: + topology: single + vlans: + - + cluster: + name: "{{network_profile.single.cluster.name}}" + vlan_name: "{{network_profile.single.vlan_name}}" + enable_ip_address_selection: true + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.response.name == "{{profile1_name}}" + - result.response.description == "testdesc" + - result.response.type == "Network" + - result.response.topology == "single" + - result.response.versions[0].propertiesMap.ENABLE_IP_ADDRESS_SELECTION == "true" + - result.response.versions[0].propertiesMap.VLAN_NAME == "{{network_profile.single.vlan_name}}" + - result.response.versions[0].published == false + fail_msg: "Fail: unable to verify create of single cluster network profile" + success_msg: "Pass: verify create of single cluster network profile finished succesfully " +################################################################ +- name: update the profile for single cluster by name , desc , publish + ntnx_ndb_profiles: + name: "{{profile2_name}}" + desc: "testdesc2" + network: + publish: true + profile_uuid: "{{result.profile_uuid}}" + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.response.profile.name == "{{profile2_name}}" + - result.response.profile.description == "testdesc2" + - result.response.profile.type == "Network" + - result.response.profile.topology == "single" + - result.response.profile.versions[0].propertiesMap.ENABLE_IP_ADDRESS_SELECTION == "true" + - result.response.profile.versions[0].propertiesMap.VLAN_NAME == "{{network_profile.single.vlan_name}}" + - result.response.profile.versions[0].published == true + fail_msg: "Fail: unable to update the profile for single cluster by name , desc , publish " + success_msg: "Pass: update the profile for single cluster by name , desc , publish finished succesfully " + +- set_fact: + todelete: "{{ todelete + [ result.profile_uuid ] }}" +################################################################ +- name: verify idempotency check + ntnx_ndb_profiles: + name: "{{profile2_name}}" + desc: "testdesc2" + profile_uuid: "{{result.profile_uuid}}" + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.profile_uuid is defined + fail_msg: "Fail: unable to verify idempotency check" + success_msg: "Pass: verify idempotency check finished succesfully " +################################################################ +- name: verify create of multiple cluster network profile + ntnx_ndb_profiles: + name: "{{profile3_name}}" + desc: "testdesc" + type: network + database_type: postgres + network: + topology: cluster + vlans: + - + cluster: + name: "{{network_profile.HA.cluster1.name}}" + vlan_name: "{{network_profile.HA.cluster1.vlan_name}}" + - + cluster: + name: "{{network_profile.HA.cluster2.name}}" + vlan_name: "{{network_profile.HA.cluster2.vlan_name}}" + + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.response.name == "{{profile3_name}}" + - result.response.description == "testdesc" + - result.response.type == "Network" + - result.response.topology == "cluster" + - result.response.versions[0].propertiesMap.ENABLE_IP_ADDRESS_SELECTION == "false" + - result.response.versions[0].propertiesMap.VLAN_NAME_0 == "{{network_profile.HA.cluster1.vlan_name}}" + - result.response.versions[0].propertiesMap.VLAN_NAME_1 == "{{network_profile.HA.cluster2.vlan_name}}" + - result.response.versions[0].propertiesMap.CLUSTER_NAME_0 == "{{network_profile.HA.cluster1.name}}" + - result.response.versions[0].propertiesMap.CLUSTER_NAME_1 == "{{network_profile.HA.cluster2.name}}" + fail_msg: "Fail: unable to verify create of multiple cluster network profile " + success_msg: "Pass: verify create of multiple cluster network profile finished sucessfully" + +- set_fact: + todelete: "{{ todelete + [ result.profile_uuid ] }}" +################################################################ +- name: update the profile for multiple cluster by subnets, publish + ntnx_ndb_profiles: + type: network + profile_uuid: "{{result.profile_uuid}}" + network: + publish: true + topology: cluster + vlans: + - + cluster: + name: "{{network_profile.HA.cluster1.name}}" + vlan_name: "{{network_profile.HA.cluster1.vlan_name2}}" + - + cluster: + name: "{{network_profile.HA.cluster2.name}}" + vlan_name: "{{network_profile.HA.cluster2.vlan_name2}}" + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.response.profile.name == "{{profile3_name}}" + - result.response.profile.description == "testdesc" + - result.response.profile.type == "Network" + - result.response.profile.topology == "cluster" + - result.response.profile.versions[0].propertiesMap.ENABLE_IP_ADDRESS_SELECTION == "false" + - result.response.profile.versions[0].propertiesMap.VLAN_NAME_0 == "{{network_profile.HA.cluster1.vlan_name2}}" + - result.response.profile.versions[0].propertiesMap.VLAN_NAME_1 == "{{network_profile.HA.cluster2.vlan_name2}}" + - result.response.profile.versions[0].propertiesMap.CLUSTER_NAME_0 == "{{network_profile.HA.cluster1.name}}" + - result.response.profile.versions[0].propertiesMap.CLUSTER_NAME_1 == "{{network_profile.HA.cluster2.name}}" + - result.response.profile.versions[0].published == true + fail_msg: "Fail: unable to update the profile for multiple cluster by subnets, publish " + success_msg: "Pass: update the profile for multiple cluster by subnets, publish finished successfully" +################################################################ +- name: Delete all created network profiles + ntnx_ndb_profiles: + state: absent + profile_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: True + +- name: check listing status + assert: + that: + - result.changed is defined + - result.changed == true + - result.msg == "All items completed" + fail_msg: "unable to delete all created network profiles" + success_msg: "All network profiles deleted succesfully" + +- set_fact: + todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_profiles_info/aliases b/tests/integration/targets/ntnx_ndb_profiles_info/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_profiles_info/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_profiles_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_profiles_info/meta/main.yml index 23b0fb268..6397436fc 100644 --- a/tests/integration/targets/ntnx_ndb_profiles_info/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_profiles_info/meta/main.yml @@ -1,2 +1,2 @@ dependencies: - - prepare_env \ No newline at end of file + - prepare_ndb_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml b/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml index b47afc749..9691e884d 100644 --- a/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml @@ -5,6 +5,7 @@ - name: List profiles ntnx_ndb_profiles_info: register: profiles + ignore_errors: true - name: check listing status @@ -14,13 +15,32 @@ - profiles.failed == false - profiles.changed == false - profiles.response | length > 0 - fail_msg: "Unable to list all era profile" - success_msg: "era profiles listed successfully" + fail_msg: "Unable to list all NDB profile" + success_msg: "NDB profiles listed successfully" +################################################################ +- name: List profiles with postgres database engine + ntnx_ndb_profiles_info: + filters: + engine: postgres_database + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response[0].engineType == "postgres_database" + fail_msg: "Unable to list all NDB profile with postgres database engine" + success_msg: "NDB profiles with postgres database engine listed successfully" ################################################################ - name: List Database_Parameter profiles ntnx_ndb_profiles_info: - profile_type: Database_Parameter + filters: + type: Database_Parameter register: result + ignore_errors: true - name: check listing status assert: @@ -29,13 +49,15 @@ - result.failed == false - result.changed == false - result.response[0].type == "Database_Parameter" - fail_msg: "Unable to list all Database_Parameter era profile" - success_msg: "Database_Parameter era profiles listed successfully" + fail_msg: "Unable to list all Database_Parameter NDB profile" + success_msg: "Database_Parameter NDB profiles listed successfully" ################################################################ - name: List Network profiles ntnx_ndb_profiles_info: - profile_type: Network + filters: + type: Network register: result + ignore_errors: true - name: check listing status assert: @@ -44,13 +66,15 @@ - result.failed == false - result.changed == false - result.response[0].type == "Network" - fail_msg: "Unable to list all Network era profile" - success_msg: "Network era profiles listed successfully" + fail_msg: "Unable to list all Network NDB profile" + success_msg: "Network NDB profiles listed successfully" ################################################################ - name: List Compute profiles ntnx_ndb_profiles_info: - profile_type: Compute + filters: + type: Compute register: result + ignore_errors: true - name: check listing status assert: @@ -59,13 +83,15 @@ - result.failed == false - result.changed == false - result.response[0].type == "Compute" - fail_msg: "Unable to list all Compute era profile" - success_msg: "Compute era profiles listed successfully" + fail_msg: "Unable to list all Compute NDB profile" + success_msg: "Compute NDB profiles listed successfully" ################################################################ - name: List Software profiles ntnx_ndb_profiles_info: - profile_type: Software + filters: + type: Software register: result + ignore_errors: true - name: check listing status assert: @@ -74,13 +100,14 @@ - result.failed == false - result.changed == false - result.response[0].type == "Software" - fail_msg: "Unable to list all Software era profile" - success_msg: "Software era profiles listed successfully" + fail_msg: "Unable to list all Software NDB profile" + success_msg: "Software NDB profiles listed successfully" ################################################################ -- name: get era profile using era profile name +- name: get NDB profile using NDB profile name ntnx_ndb_profiles_info: name: "{{profiles.response[0].name}}" register: result + ignore_errors: true - name: check listing status assert: @@ -89,14 +116,15 @@ - result.failed == false - result.changed == false - result.response.id == "{{profiles.response[0].id}}" - fail_msg: "Unable to get era profile using era profile name" - success_msg: "get era profile using era profile name finished successfully" + fail_msg: "Unable to get NDB profile using NDB profile name" + success_msg: "get NDB profile using NDB profile name finished successfully" ################################################################ - name: List profiles ntnx_ndb_profiles_info: uuid: "{{profiles.response[0].id}}" latest_version: true register: result + ignore_errors: true - name: check listing status assert: @@ -105,12 +133,12 @@ - result.failed == false - result.changed == false - result.response.name == "{{profiles.response[0].name}}" - fail_msg: "Unable to get era profile using era profile id" - success_msg: "get era profile using era profile id finished successfully" + fail_msg: "Unable to get NDB profile using NDB profile id" + success_msg: "get NDB profile using NDB profile id finished successfully" ################################################################ -- name: get era profiles with incorrect name +- name: get NDB profiles with incorrect name ntnx_ndb_profiles_info: name: "abcd" register: result diff --git a/tests/integration/targets/ntnx_ndb_slas/aliases b/tests/integration/targets/ntnx_ndb_slas/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_slas/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_slas/meta/main.yml b/tests/integration/targets/ntnx_ndb_slas/meta/main.yml new file mode 100644 index 000000000..6397436fc --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_slas/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_slas/tasks/CRUD.yml b/tests/integration/targets/ntnx_ndb_slas/tasks/CRUD.yml new file mode 100644 index 000000000..217ce7c5e --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_slas/tasks/CRUD.yml @@ -0,0 +1,273 @@ +--- +- debug: + msg: Start testing ntnx_ndb_slas and ntnx_ndb_slas_info + +- name: Generate random profile_name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)[0]}}" + +- set_fact: + suffix_name: "ansible-role-mapping" + +- set_fact: + todelete: [] + profile1_name: "{{random_name}}{{suffix_name}}1" + profile2_name: "{{random_name}}{{suffix_name}}2" + frequency: + logs_retention: 4 + snapshots_retention: + daily: 5 + weekly: 6 + monthly: 7 + quarterly: 8 +################################################################ create flow ######################################### +- name: Verify creation of slas with check mode + ntnx_ndb_slas: + name: "{{profile1_name}}" + desc: "testdesc" + frequency: + logs_retention: "{{frequency.logs_retention}}" + snapshots_retention: + daily: "{{frequency.snapshots_retention.daily}}" + weekly: "{{frequency.snapshots_retention.weekly}}" + monthly: "{{frequency.snapshots_retention.monthly}}" + quarterly: "{{frequency.snapshots_retention.quarterly}}" + register: result + ignore_errors: true + check_mode: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{profile1_name}}" + - result.response.description == "testdesc" + - result.response.continuousRetention == {{frequency.logs_retention}} + - result.response.dailyRetention == {{frequency.snapshots_retention.daily}} + - result.response.monthlyRetention == {{frequency.snapshots_retention.monthly}} + - result.response.quarterlyRetention == {{frequency.snapshots_retention.quarterly}} + - result.response.weeklyRetention == {{frequency.snapshots_retention.weekly}} + + fail_msg: "Fail: Verify creation of slas with check mode failed " + success_msg: "Pass: Verify creation of slas with check mode finished succesfully " +################################################################ +- name: Verify creation of slas + ntnx_ndb_slas: + name: "{{profile1_name}}" + desc: "testdesc" + frequency: + logs_retention: "{{frequency.logs_retention}}" + snapshots_retention: + daily: "{{frequency.snapshots_retention.daily}}" + weekly: "{{frequency.snapshots_retention.weekly}}" + monthly: "{{frequency.snapshots_retention.monthly}}" + quarterly: "{{frequency.snapshots_retention.quarterly}}" + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.response.name == "{{profile1_name}}" + - result.response.description == "testdesc" + - result.response.continuousRetention == {{frequency.logs_retention}} + - result.response.dailyRetention == {{frequency.snapshots_retention.daily}} + - result.response.monthlyRetention == {{frequency.snapshots_retention.monthly}} + - result.response.quarterlyRetention == {{frequency.snapshots_retention.quarterly}} + - result.response.weeklyRetention == {{frequency.snapshots_retention.weekly}} + - result.sla_uuid is defined + fail_msg: "Fail: Unable to create sla " + success_msg: "Pass: sla is created successfully " +- set_fact: + todelete: "{{ todelete + [ result.sla_uuid ] }}" +################################################################ +- set_fact: + frequency: + logs_retention: 10 + snapshots_retention: + daily: 11 + weekly: 12 + monthly: 13 + quarterly: 14 + +- name: verify slas update flow + ntnx_ndb_slas: + sla_uuid: "{{result.sla_uuid}}" + name: "{{profile2_name}}" + desc: "newdesc" + frequency: + logs_retention: "{{frequency.logs_retention}}" + snapshots_retention: + daily: "{{frequency.snapshots_retention.daily}}" + weekly: "{{frequency.snapshots_retention.weekly}}" + monthly: "{{frequency.snapshots_retention.monthly}}" + quarterly: "{{frequency.snapshots_retention.quarterly}}" + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.response.name == "{{profile2_name}}" + - result.response.description == "newdesc" + - result.response.continuousRetention == {{frequency.logs_retention}} + - result.response.dailyRetention == {{frequency.snapshots_retention.daily}} + - result.response.monthlyRetention == {{frequency.snapshots_retention.monthly}} + - result.response.quarterlyRetention == {{frequency.snapshots_retention.quarterly}} + - result.response.weeklyRetention == {{frequency.snapshots_retention.weekly}} + - result.sla_uuid is defined + fail_msg: "Fail: Unable to update sla " + success_msg: "Pass: verify slas update flow finished succesfully" +################################################################ update flow ######################################### +- name: verify slas update flow with check mode + ntnx_ndb_slas: + sla_uuid: "{{result.sla_uuid}}" + name: "{{profile2_name}}" + desc: "newdesc" + frequency: + logs_retention: "{{frequency.logs_retention}}" + snapshots_retention: + daily: "{{frequency.snapshots_retention.daily}}" + weekly: "{{frequency.snapshots_retention.weekly}}" + monthly: "{{frequency.snapshots_retention.monthly}}" + quarterly: "{{frequency.snapshots_retention.quarterly}}" + register: result + ignore_errors: true + check_mode: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{profile2_name}}" + - result.response.description == "newdesc" + - result.response.continuousRetention == {{frequency.logs_retention}} + - result.response.dailyRetention == {{frequency.snapshots_retention.daily}} + - result.response.monthlyRetention == {{frequency.snapshots_retention.monthly}} + - result.response.quarterlyRetention == {{frequency.snapshots_retention.quarterly}} + - result.response.weeklyRetention == {{frequency.snapshots_retention.weekly}} + - result.sla_uuid is defined + fail_msg: "Fail: verify slas update flow with check mode " + success_msg: "Pass: verify slas update flow with check mode finished succesfully" +################################################################ +- name: verify idempotency + ntnx_ndb_slas: + sla_uuid: "{{result.sla_uuid}}" + name: "{{profile2_name}}" + desc: "newdesc" + frequency: + logs_retention: "{{frequency.logs_retention}}" + snapshots_retention: + daily: "{{frequency.snapshots_retention.daily}}" + weekly: "{{frequency.snapshots_retention.weekly}}" + monthly: "{{frequency.snapshots_retention.monthly}}" + quarterly: "{{frequency.snapshots_retention.quarterly}}" + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.msg == "Nothing to change." + - result.failed == false + - result.changed == false + fail_msg: "Fail: verify idempotency" + success_msg: "Pass: verify idempotency " + +######################################################################## Info module tests ################################################# + +- debug: + msg: Start testing ntnx_ndb_slas_info + +- name: List all era slas + ntnx_ndb_slas_info: + register: slas + +- name: check listing status + assert: + that: + - slas.response is defined + - slas.failed == false + - slas.changed == false + - slas.response | length > 0 + fail_msg: "Unable to list all era slas" + success_msg: "era slas listed successfully" +################################################################ +- name: get era slas using it's name + ntnx_ndb_slas_info: + name: "{{slas.response[0].name}}" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{slas.response[0].name}}" + fail_msg: "Unable to get era slas using it's name " + success_msg: "get era slas using it's name successfully" +################################################################ +- name: List slas use id + ntnx_ndb_slas_info: + uuid: "{{slas.response[0].id}}" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{slas.response[0].name}}" + fail_msg: "Unable to get era slas using it's id " + success_msg: "get era slas using it's id successfully" +################################################################ + + +- name: get era slas with incorrect name + ntnx_ndb_slas_info: + name: "abcd" + register: result + no_log: true + ignore_errors: True + +- name: check listing status + assert: + that: + - result.error is defined + - result.failed == true + - result.changed == false + fail_msg: "module didn't errored out correctly when incorrect name is given" + success_msg: "module errored out correctly when incorrect name is given" + +######################################################################## Delete flow ################################################# +- name: verify slas delete flow + ntnx_ndb_slas: + state: absent + sla_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: True + +- name: check listing status + assert: + that: + - result.changed is defined + - result.changed == true + - result.msg == "All items completed" + fail_msg: "unable to delete all created slas" + success_msg: "All slas deleted succesfully" + +- set_fact: + todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_slas_info/tasks/main.yml b/tests/integration/targets/ntnx_ndb_slas/tasks/main.yml similarity index 86% rename from tests/integration/targets/ntnx_ndb_slas_info/tasks/main.yml rename to tests/integration/targets/ntnx_ndb_slas/tasks/main.yml index da502fcc5..cbd87d175 100644 --- a/tests/integration/targets/ntnx_ndb_slas_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_slas/tasks/main.yml @@ -6,4 +6,4 @@ nutanix_password: "{{ndb_password}}" validate_certs: false block: - - import_tasks: "info.yml" + - import_tasks: "CRUD.yml" diff --git a/tests/integration/targets/ntnx_ndb_slas_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_slas_info/meta/main.yml deleted file mode 100644 index 23b0fb268..000000000 --- a/tests/integration/targets/ntnx_ndb_slas_info/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_slas_info/tasks/info.yml b/tests/integration/targets/ntnx_ndb_slas_info/tasks/info.yml deleted file mode 100644 index 2acec16a5..000000000 --- a/tests/integration/targets/ntnx_ndb_slas_info/tasks/info.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- debug: - msg: Start testing ntnx_ndb_slas_info - -- name: List all era slas - ntnx_ndb_slas_info: - register: slas - -- name: check listing status - assert: - that: - - slas.response is defined - - slas.failed == false - - slas.changed == false - - slas.response | length > 0 - fail_msg: "Unable to list all era slas" - success_msg: "era slas listed successfully" -################################################################ -- name: get era slas using it's name - ntnx_ndb_slas_info: - name: "{{slas.response[0].name}}" - register: result - -- name: check listing status - assert: - that: - - result.response is defined - - result.failed == false - - result.changed == false - - result.response.name == "{{slas.response[0].name}}" - fail_msg: "Unable to get era slas using it's name " - success_msg: "get era slas using it's name successfully" -################################################################ -- name: List slas use id - ntnx_ndb_slas_info: - uuid: "{{slas.response[0].id}}" - register: result - -- name: check listing status - assert: - that: - - result.response is defined - - result.failed == false - - result.changed == false - - result.response.name == "{{slas.response[0].name}}" - fail_msg: "Unable to get era slas using it's id " - success_msg: "get era slas using it's id successfully" -################################################################ - - -- name: get era slas with incorrect name - ntnx_ndb_slas_info: - name: "abcd" - register: result - no_log: true - ignore_errors: True - -- name: check listing status - assert: - that: - - result.error is defined - - result.failed == true - - result.changed == false - fail_msg: "module didn't errored out correctly when incorrect name is given" - success_msg: "module errored out correctly when incorrect name is given" \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_snapshots_info/aliases b/tests/integration/targets/ntnx_ndb_snapshots_info/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_snapshots_info/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_snapshots_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_snapshots_info/meta/main.yml new file mode 100644 index 000000000..6397436fc --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_snapshots_info/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/info.yml b/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/info.yml new file mode 100644 index 000000000..b0d7780f1 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/info.yml @@ -0,0 +1,103 @@ +--- +- debug: + msg: Start testing ntnx_ndb_snapshots_info + +- name: List all NDB snapshots + ntnx_ndb_snapshots_info: + register: snapshots + +- name: check listing status + assert: + that: + - snapshots.response is defined + - snapshots.failed == false + - snapshots.changed == false + - snapshots.response | length > 0 + fail_msg: "Unable to list all NDB snapshots" + success_msg: "NDB snapshots listed successfully" +################################################################ +- name: get NDB snapshots using it's UTC time_zone + ntnx_ndb_snapshots_info: + filters: + time_zone: UTC + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response[0].timeZone == "UTC" + fail_msg: "Unable to get NDB snapshots with UTC time_zone " + success_msg: "get NDB snapshots using with utc time_zone" + +################################################################ +- name: get NDB snapshots with time-machine id + ntnx_ndb_snapshots_info: + filters: + value: "{{snapshots.response[0].timeMachineId}}" + value_type: time-machine + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response[0].timeMachineId == "{{snapshots.response[0].timeMachineId}}" + fail_msg: "Unable to get NDB snapshots with time-machine id " + success_msg: "get NDB snapshots using with time-machine id" +################################################################ +- name: get NDB snapshots using it's uuid + ntnx_ndb_snapshots_info: + uuid: "{{snapshots.response[0].id}}" + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.id == "{{snapshots.response[0].id}}" + fail_msg: "Unable to get NDB snapshots using it's uuid " + success_msg: "get NDB snapshots using it's uuid successfully" +# ################################################################ +- name: List snapshots use uuid and get snapshot files + ntnx_ndb_snapshots_info: + uuid: "{{snapshots.response[0].id}}" + get_files: true + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response[0].fileList is defined + - result.snapshot_uuid == "{{snapshots.response[0].id}}" + fail_msg: "Unable to get NDB snapshots using it's id and get files " + success_msg: "get NDB snapshots using it's id and get files successfully" +# ################################################################ +- name: get NDB snapshots with incorrect uuid + ntnx_ndb_snapshots_info: + uuid: "abcd" + register: result + no_log: true + ignore_errors: True + +- name: check listing status + assert: + that: + - result.error is defined + - result.failed == true + - result.changed == false + fail_msg: "Fail: module didn't errored out correctly when incorrect uuid is given" + success_msg: "Pass: module errored out correctly when incorrect uuid is given" diff --git a/tests/integration/targets/ntnx_ndb_clusters_info/tasks/main.yml b/tests/integration/targets/ntnx_ndb_snapshots_info/tasks/main.yml similarity index 100% rename from tests/integration/targets/ntnx_ndb_clusters_info/tasks/main.yml rename to tests/integration/targets/ntnx_ndb_snapshots_info/tasks/main.yml diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/aliases b/tests/integration/targets/ntnx_ndb_software_profiles/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/meta/main.yml b/tests/integration/targets/ntnx_ndb_software_profiles/meta/main.yml new file mode 100644 index 000000000..ea2e9da19 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_software_profiles/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml new file mode 100644 index 000000000..14979797f --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml @@ -0,0 +1,559 @@ +--- +# Summary: +# This playbook will test below cases: +# 1. Creation of software profile +# 2. Update software profile +# 3. Create, update and delete version +# 4. Publish, unpublish and deprecate profile +# 5. Replicate profiles to multi clusters +# 6. Delete of profile + +- debug: + msg: "start ndb software profile tests" + +- name: Generate random name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + +- set_fact: + profile1_name: "{{random_name[0]}}" + profile1_name_updated: "{{random_name[0]}}-updated" + profile2_name: "{{random_name[0]}}2" + + + +- name: create software profile create spec + check_mode: yes + ntnx_ndb_profiles: + name: "{{profile1_name}}" + desc: "{{profile1_name}}-desc" + type: "software" + database_type: "postgres" + software: + topology: "cluster" + name: "v1.0" + desc: "v1.0-desc" + notes: + os: "os_notes" + db_software: "db_notes" + db_server_vm: + name: "{{db_server_vm.name}}" + clusters: + - name: "{{cluster.cluster1.name}}" + - uuid: "{{cluster.cluster2.uuid}}" + register: result + + + +- set_fact: + expected_result: { + "changed": false, + "error": null, + "failed": false, + "profile_uuid": null, + "response": { + "availableClusterIds": [ + "{{cluster.cluster1.uuid}}", + "{{cluster.cluster2.uuid}}" + ], + "description": "{{profile1_name}}-desc", + "engineType": "postgres_database", + "name": "{{profile1_name}}", + "properties": [ + { + "name": "BASE_PROFILE_VERSION_NAME", + "value": "v1.0" + }, + { + "name": "BASE_PROFILE_VERSION_DESCRIPTION", + "value": "v1.0-desc" + }, + { + "name": "OS_NOTES", + "value": "os_notes" + }, + { + "name": "DB_SOFTWARE_NOTES", + "value": "db_notes" + }, + { + "name": "SOURCE_DBSERVER_ID", + "value": "{{db_server_vm.uuid}}" + } + ], + "systemProfile": false, + "topology": "cluster", + "type": "Software" + } + } + +- name: check spec for creating software profile + assert: + that: + - result == expected_result + + fail_msg: "Fail: Unable to create spec for software profile create" + success_msg: "Pass: Spec for creating software profile generated successfully" + +- name: create software profile with base version and cluster instance topology. Replicated to multiple clusters + ntnx_ndb_profiles: + name: "{{profile1_name}}-replicated" + desc: "{{profile1_name}}-desc-replicated" + type: "software" + database_type: "postgres" + software: + topology: "cluster" + name: "v1.0" + desc: "v1.0-desc" + notes: + os: "os_notes" + db_software: "db_notes" + db_server_vm: + uuid: "{{db_server_vm.uuid}}" + clusters: + - name: "{{cluster.cluster1.name}}" + - uuid: "{{cluster.cluster2.uuid}}" + register: result + + + +- set_fact: + clusters: ["{{cluster.cluster1.uuid}}", "{{cluster.cluster2.uuid}}"] + +- name: check status of creation + assert: + that: + - result.changed == True + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - result.response.name == "{{profile1_name}}-replicated" + - result.response.description == "{{profile1_name}}-desc-replicated" + - result.response.clusterAvailability[0].nxClusterId in clusters + - result.response.clusterAvailability[1].nxClusterId in clusters + - result.response.engineType == "postgres_database" + - result.response.status == "READY" + - result.response.topology == "cluster" + - result.response.type == "Software" + - result.response.versions[0].name == "v1.0" + - result.response.versions[0].description == "v1.0-desc" + - result.response.versions[0].propertiesMap["SOURCE_DBSERVER_ID"] == "{{db_server_vm.uuid}}" + + fail_msg: "Fail: Unable to create software profile with base version and cluster instance topology with replicating to multiple clusters." + success_msg: "Pass: Software profile with base version, cluster instance topology and replicated to multiple clusters created successfully" + + +- name: create software profile with base version and single instance topology + ntnx_ndb_profiles: + name: "{{profile2_name}}" + desc: "{{profile2_name}}-desc" + type: "software" + database_type: "postgres" + software: + topology: "single" + name: "v1.0" + desc: "v1.0-desc" + notes: + os: "os_notes" + db_software: "db_notes" + db_server_vm: + uuid: "{{db_server_vm.uuid}}" + clusters: + - name: "{{cluster.cluster1.name}}" + register: result + + + +- name: check status of creation + assert: + that: + - result.changed == True + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - result.response.name == "{{profile2_name}}" + - result.response.description == "{{profile2_name}}-desc" + - result.response.clusterAvailability[0].nxClusterId == "{{cluster.cluster1.uuid}}" + - result.response.engineType == "postgres_database" + - result.response.status == "READY" + - result.response.topology == "single" + - result.response.type == "Software" + - result.response.versions[0].name == "v1.0" + - result.response.versions[0].description == "v1.0-desc" + - result.response.versions[0].propertiesMap["SOURCE_DBSERVER_ID"] == "{{db_server_vm.uuid}}" + + fail_msg: "Fail: Unable to create software profile with base version and single instance topology" + success_msg: "Pass: Software profile with base version and single instance topology created successfully" + + +- set_fact: + profile_uuid: "{{result.profile_uuid}}" + +- name: update software profile + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + name: "{{profile1_name}}-updated1" + desc: "{{profile1_name}}-desc-updated" + register: result + + + +- name: check status of creation + assert: + that: + - result.changed == True + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - result.response.profile is defined + - result.response.profile.name == "{{profile1_name}}-updated1" + - result.response.profile.description == "{{profile1_name}}-desc-updated" + + fail_msg: "Fail: Unable to update software profile" + success_msg: "Pass: Software profile updated successfully" + + +- name: idempotency checks + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + name: "{{profile1_name}}-updated1" + desc: "{{profile1_name}}-desc-updated" + register: result + + + +- name: check status of creation + assert: + that: + - result.changed == False + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - result.response.profile is defined + - result.response.profile.name == "{{profile1_name}}-updated1" + - result.response.profile.description == "{{profile1_name}}-desc-updated" + + fail_msg: "Fail: Update didnt get skipped due to no state changes" + success_msg: "Pass: Update skipped successfully due to no state changes" + +- name: create software profile version spec + check_mode: yes + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + database_type: "postgres" + software: + name: "v2.0" + desc: "v2.0-desc" + notes: + os: "os_notes for v2" + db_software: "db_notes for v2" + db_server_vm: + name: "{{db_server_vm.name}}" + + register: result + +- set_fact: + expected_result: { + "changed": false, + "error": null, + "failed": false, + "profile_type": "software", + "profile_uuid": "{{profile_uuid}}", + "response": { + "profile": { + "description": "{{profile1_name}}-desc-updated", + "engineType": "postgres_database", + "name": "{{profile1_name}}-updated1" + }, + "version": { + "description": "v2.0-desc", + "engineType": "postgres_database", + "name": "v2.0", + "properties": [ + { + "name": "OS_NOTES", + "value": "os_notes for v2" + }, + { + "name": "DB_SOFTWARE_NOTES", + "value": "db_notes for v2" + }, + { + "name": "SOURCE_DBSERVER_ID", + "value": "{{db_server_vm.uuid}}" + } + ], + "systemProfile": false, + "topology": null, + "type": "Software" + } + } + } + +- name: check spec for creating spec for software profile version + assert: + that: + - result == expected_result + + fail_msg: "Fail: Unable to create spec for software profile version create" + success_msg: "Pass: Spec for creating software profile version generated successfully" + + +- name: create software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + database_type: "postgres" + software: + name: "v2.0" + desc: "v2.0-desc" + notes: + os: "os_notes for v2" + db_software: "db_notes for v2" + db_server_vm: + uuid: "{{db_server_vm.uuid}}" + + register: result + + + +- name: check status of version create + assert: + that: + - result.changed == True + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - result.response.profile.status == "READY" + - result.response.profile.versions | length == 2 + - result.response.version.type == "Software" + - result.response.version.name == "v2.0" + - result.response.version.description == "v2.0-desc" + - result.response.version.propertiesMap["SOURCE_DBSERVER_ID"] == "{{db_server_vm.uuid}}" + - result.response.version.published == false + + fail_msg: "Fail: Unable to create software profile version" + success_msg: "Pass: Software profile version created successfully" + +- set_fact: + version_uuid: "{{result.version_uuid}}" + +- name: create spec for update software profile version + check_mode: yes + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + database_type: "postgres" + software: + version_uuid: "{{result.version_uuid}}" + name: "v2.0-updated" + desc: "v2.0-desc-updated" + + register: result + + + +- name: check status of spec + assert: + that: + - result.changed == False + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - result.version_uuid == version_uuid + - result.response.version.name == "v2.0-updated" + - result.response.version.description == "v2.0-desc-updated" + - result.response.version.published == false + + fail_msg: "Fail: Unable to create spec for updating software profile version" + success_msg: "Pass: Spec for updating software profile version created successfully" + + +- name: update software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + database_type: "postgres" + software: + version_uuid: "{{result.version_uuid}}" + name: "v2.0-updated" + desc: "v2.0-desc-updated" + + register: result + + + +- name: check status of update + assert: + that: + - result.changed == True + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - result.version_uuid == version_uuid + - result.response.profile.status == "READY" + - result.response.profile.versions | length == 2 + - result.response.version.type == "Software" + - result.response.version.id == version_uuid + - result.response.version.name == "v2.0-updated" + - result.response.version.description == "v2.0-desc-updated" + - result.response.version.status == "READY" + - result.response.version.propertiesMap["SOURCE_DBSERVER_ID"] == "{{db_server_vm.uuid}}" + - result.response.version.published == false + + fail_msg: "Fail: Unable to update software profile version" + success_msg: "Pass: Software profile version updated successfully" + + +- set_fact: + version_uuid: "{{result.version_uuid}}" + +- name: publish software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + software: + version_uuid: "{{version_uuid}}" + publish: True + register: result + + + +- name: check status of update + assert: + that: + - result.changed == True + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - result.version_uuid == version_uuid + - result.response.profile.status == "READY" + - result.response.version.status == "READY" + - result.response.version.published == true + - result.response.version.deprecated == false + + fail_msg: "Fail: Unable to publish software profile version" + success_msg: "Pass: Software profile version published successfully" + +- name: unpublish software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + software: + version_uuid: "{{version_uuid}}" + publish: false + register: result + +- name: check status of update + assert: + that: + - result.changed == True + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - result.version_uuid == version_uuid + - result.response.profile.status == "READY" + - result.response.version.status == "READY" + - result.response.version.published == false + - result.response.version.deprecated == false + + fail_msg: "Fail: Unable to unpublish software profile version" + success_msg: "Pass: Software version unpublished successfully" + + +- name: deprecate software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + software: + version_uuid: "{{version_uuid}}" + deprecate: True + register: result + + + +- name: check status of update + assert: + that: + - result.changed == True + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - result.version_uuid == version_uuid + - result.response.profile.status == "READY" + - result.response.version.status == "READY" + - result.response.version.published == false + - result.response.version.deprecated == true + + fail_msg: "Fail: Unable to deprecate software profile version" + success_msg: "Pass: Software version deprecated successfully" + + + +- name: delete software profile version + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + software: + version_uuid: "{{version_uuid}}" + state: "absent" + register: result + + +- name: check status of update + assert: + that: + - result.changed == True + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - result.response.profile.status == "READY" + - result.response.version == "Profile Version Successfully Deleted." + + fail_msg: "Fail: Unable to delete software profile version" + success_msg: "Pass: Software version deleted successfully" + + +- name: replicate software profile + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + clusters: + - name: "{{cluster.cluster2.name}}" + register: result + +- name: wait for 3 minutes for replication to finish from source cluster to cluster2 + ansible.builtin.pause: + minutes: 3 + + +- set_fact: + clusters: {} + +- name: create clusters status map + set_fact: + clusters: "{{ clusters | default({}) | combine ({ item['nxClusterId'] : item['status'] }) }}" + loop: "{{result.response.profile.clusterAvailability}}" + no_log: True +- name: check status of replication + assert: + that: + - result.changed == True + - result.failed == False + - result.response is defined + - result.profile_uuid is defined + - clusters["{{cluster.cluster1.uuid}}"] == "INACTIVE" + - clusters["{{cluster.cluster2.uuid}}"] == "ACTIVE" + - result.response.profile.status == "READY" + + fail_msg: "Fail: Unable to replicate software profile" + success_msg: "Pass: Software profile replicated successfully" + +- name: delete software profile + ntnx_ndb_profiles: + profile_uuid: "{{profile_uuid}}" + state: "absent" + register: result + + +- name: check status of delete + assert: + that: + - result.changed == True + - result.failed == False + - result.response == "Profile Successfully Deleted." + + fail_msg: "Fail: Unable to delete software profile" + success_msg: "Pass: Software profile delete successfully" diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/main.yml b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/main.yml new file mode 100644 index 000000000..b25157ea7 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false + + block: + - import_tasks: "crud.yml" diff --git a/tests/integration/targets/ntnx_ndb_tags/aliases b/tests/integration/targets/ntnx_ndb_tags/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_ndb_tags/meta/main.yml b/tests/integration/targets/ntnx_ndb_tags/meta/main.yml new file mode 100644 index 000000000..ea2e9da19 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_tags/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_tags/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_tags/tasks/crud.yml new file mode 100644 index 000000000..de2320e20 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_tags/tasks/crud.yml @@ -0,0 +1,191 @@ +--- + +- debug: + msg: "start ntnx_ndb_tags" + +- name: Generate random name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + +- set_fact: + tag_name: "{{random_name[0]}}" + tag_name_updated: "{{random_name[0]}}-updated" + +############################# Create & Delete tests ######################## + +- name: check mode for creation + check_mode: yes + ntnx_ndb_tags: + name: "{{tag_name}}-timemachine" + desc: tag-created-by-ansible + tag_value_required: True + entity_type: TIME_MACHINE + register: result + +- set_fact: + expected_response: { + "description": "tag-created-by-ansible", + "entityType": "TIME_MACHINE", + "name": "{{tag_name}}-timemachine", + "required": true + } + +- name: Creation Status + assert: + that: + - result.response is defined + - result.changed == false + - result.response == expected_response + + fail_msg: "Unable to create spec for tag" + success_msg: "Spec generated succefully for tag creation" + + +- name: create tags for clone + ntnx_ndb_tags: + name: "{{tag_name}}-clone" + desc: tag-created-by-ansible + tag_value_required: True + entity_type: CLONE + register: result + +- name: Check create status + assert: + that: + - result.response is defined + - result.changed == true + - result.uuid is defined + - result.response.name == "{{tag_name}}-clone" + - result.response.entityType == "CLONE" + - result.response.status == "ENABLED" + - result.response.description == "tag-created-by-ansible" + - result.response.required == true + fail_msg: "Tag for clone create failed" + success_msg: "Tag for clone created succefully" + + + +- name: delete the tag + ntnx_ndb_tags: + state: "absent" + uuid: "{{result.uuid}}" + register: result + +- name: Check delete status + assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + fail_msg: "Unable to delete tag" + success_msg: "tag deleted succefully" + + +- name: create tags for databases + ntnx_ndb_tags: + name: "{{tag_name}}-database" + desc: tag-created-by-ansible + tag_value_required: False + entity_type: DATABASE + register: result + + +- name: check create status + assert: + that: + - result.response is defined + - result.changed == true + - result.uuid is defined + - result.response.name == "{{tag_name}}-database" + - result.response.required == false + - result.response.entityType == "DATABASE" + - result.response.status == "ENABLED" + fail_msg: "Tag create for databases failed" + success_msg: "Tag created succefully" + + +- set_fact: + tag_uuid: "{{result.uuid}}" + +################################## Update tests ######################## + +- name: update tag + ntnx_ndb_tags: + uuid: "{{tag_uuid}}" + name: "{{tag_name_updated}}" + desc: tag-created-by-ansible-updated + tag_value_required: true + status: "DEPRECATED" + register: result + +- name: Check update status + assert: + that: + - result.response is defined + - result.changed == true + - result.uuid is defined + - result.response.name == "{{tag_name_updated}}" + - result.response.required == true + - result.response.entityType == "DATABASE" + - result.response.status == "DEPRECATED" + fail_msg: "tag update failed" + success_msg: "tag updated succefully" + + + +- name: idempotency checks + ntnx_ndb_tags: + uuid: "{{tag_uuid}}" + name: "{{tag_name_updated}}" + desc: tag-created-by-ansible-updated + tag_value_required: true + status: "DEPRECATED" + register: result + + + +- name: check idempotency status + assert: + that: + - result.changed == false + - result.failed == false + - "'Nothing to change' in result.msg" + fail_msg: "tag got updated" + success_msg: "tag update skipped due to no state changes" + + +- name: enable tag + ntnx_ndb_tags: + uuid: "{{tag_uuid}}" + tag_value_required: true + status: "ENABLED" + register: result + + + +- name: check status changes + assert: + that: + - result.response is defined + - result.changed == true + - result.uuid == "{{tag_uuid}}" + - result.response.status == "ENABLED" + fail_msg: "Enabling tag failed" + success_msg: "Tag enabled succefully" + + +- name: delete the tag + ntnx_ndb_tags: + state: "absent" + uuid: "{{tag_uuid}}" + register: result + + +- name: Check delete status + assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + fail_msg: "Unable to delete tag" + success_msg: "tag deleted succefully" diff --git a/tests/integration/targets/ntnx_ndb_tags/tasks/main.yml b/tests/integration/targets/ntnx_ndb_tags/tasks/main.yml new file mode 100644 index 000000000..5216bd0e2 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_tags/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false + block: + - import_tasks: "crud.yml" diff --git a/tests/integration/targets/ntnx_ndb_time_machines_info/aliases b/tests/integration/targets/ntnx_ndb_time_machines_info/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_ndb_time_machines_info/meta/main.yml b/tests/integration/targets/ntnx_ndb_time_machines_info/meta/main.yml index 23b0fb268..6397436fc 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_info/meta/main.yml +++ b/tests/integration/targets/ntnx_ndb_time_machines_info/meta/main.yml @@ -1,2 +1,2 @@ dependencies: - - prepare_env \ No newline at end of file + - prepare_ndb_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/info.yml b/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/info.yml index 4fa4b2fb6..bed307a82 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_ndb_time_machines_info/tasks/info.yml @@ -2,7 +2,7 @@ - debug: msg: Start testing ntnx_ndb_time_machines_info -- name: List all era tms +- name: List all NDB tms ntnx_ndb_time_machines_info: register: tms @@ -13,10 +13,28 @@ - tms.failed == false - tms.changed == false - tms.response | length > 0 - fail_msg: "Unable to list all era tms" - success_msg: "era tms listed successfully" + fail_msg: "Unable to list all NDB tms" + success_msg: "NDB tms listed successfully" ################################################################ -- name: get era tms using it's name +- name: List all NDB tms using filter + ntnx_ndb_time_machines_info: + filters: + value_type: name + value: "{{tms.response[0].name}}" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response | length > 0 + - result.response[0].id == "{{tms.response[0].id}}" + fail_msg: "Unable to list all NDB tms using filter" + success_msg: "NDB tms listed successfully using filter" +################################################################ +- name: get NDB tms using it's name ntnx_ndb_time_machines_info: name: "{{tms.response[0].name}}" register: result @@ -29,8 +47,8 @@ - result.failed == false - result.changed == false - result.response.id == "{{tms.response[0].id}}" - fail_msg: "Unable to get era tms using it's name " - success_msg: "get era tms using it's name successfully" + fail_msg: "Unable to get NDB tms using it's name " + success_msg: "get NDB tms using it's name successfully" ################################################################ - name: List tms use id ntnx_ndb_time_machines_info: @@ -44,11 +62,32 @@ - result.failed == false - result.changed == false - result.response.name == "{{tms.response[0].name}}" - fail_msg: "Unable to get era tms using it's id " - success_msg: "get era tms using it's id successfully" + fail_msg: "Unable to get NDB tms using it's id " + success_msg: "get NDB tms using it's id successfully" + +################################################################ + +- name: List tms use id and load database as well + ntnx_ndb_time_machines_info: + uuid: "{{tms.response[0].id}}" + filters: + load_database: true + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{tms.response[0].name}}" + - result.response.database is defined + fail_msg: "Unable to get NDB tms using it's id" + success_msg: "get NDB tms using it's id successfully" + ################################################################ -- name: get era timemachine with incorrect name +- name: get NDB timemachine with incorrect name ntnx_ndb_time_machines_info: name: "abcd" register: result diff --git a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/aliases b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/meta/main.yml b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/meta/main.yml new file mode 100644 index 000000000..6397436fc --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/data_access_management_and_snapshots.yml b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/data_access_management_and_snapshots.yml new file mode 100644 index 000000000..2cce408d1 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/data_access_management_and_snapshots.yml @@ -0,0 +1,281 @@ +--- +- debug: + msg: Start testing ntnx_ndb_time_machine_clusters + +- name: create data access instance with cluster name and sla name + ntnx_ndb_time_machine_clusters: + time_machine_uuid: "{{time_machine.uuid}}" + cluster: + name: "{{cluster.cluster2.name}}" + sla: + name: "{{sla.name}}" + register: out + + +- name: check listing status + assert: + that: + - out.response is defined + - out.time_machine_uuid is defined + - out.changed == true + - out.cluster_uuid is defined + - out.failed == false + fail_msg: "fail: Unable create data access instance with cluster name and sla name" + success_msg: "pass: create data access instance with cluster name and sla name finished successfully" +####################### +- name: update data access instance with new sla name + ntnx_ndb_time_machine_clusters: + time_machine_uuid: "{{time_machine.uuid}}" + cluster: + name: "{{cluster.cluster2.name}}" + sla: + name: "{{sla2.name}}" + register: result + + + +- name: check listing status + assert: + that: + - result.response is defined + - result.time_machine_uuid is defined + - result.changed == true + - result.cluster_uuid is defined + - result.failed == false + - result.response.slaId != out.response.slaId + fail_msg: "fail: Unable to update data access instance with new sla name" + success_msg: "pass: update data access instance with new sla name finished successfully" + + +- name: idempotency checks + ntnx_ndb_time_machine_clusters: + time_machine_uuid: "{{time_machine.uuid}}" + cluster: + name: "{{cluster.cluster2.name}}" + sla: + name: "{{sla2.name}}" + register: result + +- name: check idempotency status + assert: + that: + - result.changed == false + - result.failed == false + - "'Nothing to change' in result.msg" + fail_msg: "clusters in time machine go updated" + success_msg: "update of clusters in time machine skipped due to no state changes" + + +############################################## multicluster snapshots and replication tests ######################### + +# cluster1: primary cluster +# cluster2: secondary cluster + +- name: create snapshot on cluster2 + ntnx_ndb_database_snapshots: + name: "ansible-created-snapshot-on-{{cluster.cluster2.name}}" + time_machine_uuid: "{{time_machine.uuid}}" + clusters: + - name: "{{cluster.cluster2.name}}" + register: result + +- name: check snapshot status + assert: + that: + - result.response is defined + - result.snapshot_uuid is defined + - result.changed == true + - result.response.name == "ansible-created-snapshot-on-{{cluster.cluster2.name}}" + - result.failed == false + - result.response.status == "PENDING" + + fail_msg: "fail: Unable to create snapshot for secondary cluster" + success_msg: "pass: snapshot created successfully for secondary cluster" + + +- name: wait for 2 minutes for replication to finish from source cluster to cluster2 + ansible.builtin.pause: + minutes: 2 + +- name: check the status of post of replication if snapshot is active + ntnx_ndb_snapshots_info: + uuid: "{{result.snapshot_uuid}}" + register: result + +- name: check snapshot status + assert: + that: + - result.response.name == "ansible-created-snapshot-on-{{cluster.cluster2.name}}" + - result.response.status == "ACTIVE" + fail_msg: "fail: Unable to check snapshot status post internal replication" + success_msg: "pass: snapshot replicated successfully on secondary cluster" + + + +- name: create a snapshot on cluster1 + ntnx_ndb_database_snapshots: + name: "ansible-created-snapshot-on-{{cluster.cluster1.name}}" + time_machine_uuid: "{{time_machine.uuid}}" + clusters: + - uuid: "{{cluster.cluster1.uuid}}" + register: result + +- name: check snapshot status on cluster2 + assert: + that: + - result.response is defined + - result.snapshot_uuid is defined + - result.changed == true + - result.response.name == "ansible-created-snapshot-on-{{cluster.cluster1.name}}" + - result.failed == false + - result.response.nxClusterId == cluster.cluster1.uuid + fail_msg: "fail: Unable to create snapshot on primary cluster" + success_msg: "pass: snapshot created successfully on primary cluster" + +- name: setting snapshot uuid for replication + set_fact: + snapshot_uuid: "{{result.snapshot_uuid}}" + +- name: create spec for replicating snapshot from cluster1 on cluster2 + check_mode: yes + ntnx_ndb_replicate_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + clusters: + - name: "{{cluster.cluster2.name}}" + expiry_days: 20 + register: result + +- set_fact: + expected_result: { + "changed": false, + "error": null, + "failed": false, + "response": { + "lcmConfig": { + "snapshotLCMConfig": { + "expiryDetails": { + "expireInDays": 20 + } + } + }, + "nxClusterIds": [ + "{{cluster.cluster2.uuid}}" + ] + }, + "snapshot_uuid": "{{snapshot_uuid}}" + } + + + +- name: check snapshot replication spec + assert: + that: + - result == expected_result + fail_msg: "fail: Unable to create snapshot replication snapshot" + success_msg: "pass: snapshot replication spec created successfully" + + +- name: replicate snapshot on cluster2 + ntnx_ndb_replicate_database_snapshots: + snapshot_uuid: "{{snapshot_uuid}}" + clusters: + - name: "{{cluster.cluster2.name}}" + expiry_days: 20 + register: result + + + +- name: verify status of snapshot replication + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + fail_msg: "snapshot replication failed" + success_msg: "snapshot replicated successfully" + +####################### + +- name: delete time machine + ntnx_ndb_time_machine_clusters: + state: absent + time_machine_uuid: "{{time_machine.uuid}}" + cluster: + uuid: "{{cluster.cluster2.uuid}}" + register: result + +- name: check delete status + assert: + that: + - result.response is defined + - result.time_machine_uuid is defined + - result.changed == true + - result.failed == false + fail_msg: "fail: Unable to remove cluster from time machine" + success_msg: "pass: cluster from time machine removed successfully" + +- name: wait for 2 minutes for internal cleanup to finish + ansible.builtin.pause: + minutes: 5 + +##################### +- name: create data access instance with cluster uuid and sla uuid + ntnx_ndb_time_machine_clusters: + time_machine_uuid: "{{time_machine.uuid}}" + cluster: + uuid: "{{cluster.cluster2.uuid}}" + sla: + uuid: "{{sla.uuid}}" + register: out + +- name: check listing status + assert: + that: + - out.response is defined + - out.time_machine_uuid is defined + - out.changed == true + - out.cluster_uuid is defined + - out.failed == false + fail_msg: "fail: Unable create data access instance with cluster uuid and sla uuid" + success_msg: "pass: create data access instance with cluster uuid and sla uuid finished successfully" +####################### +- name: update data access instance with sla uuid + ntnx_ndb_time_machine_clusters: + time_machine_uuid: "{{time_machine.uuid}}" + cluster: + uuid: "{{cluster.cluster2.uuid}}" + sla: + uuid: "{{sla2.uuid}}" + register: result + +- name: check listing status + assert: + that: + - result.response is defined + - result.time_machine_uuid is defined + - result.changed == true + - result.cluster_uuid is defined + - result.response.slaId != out.response.slaId + - result.failed == false + fail_msg: "fail: Unable to update data access instance with sla uuid" + success_msg: "pass: update data access instance with sla uuid finished successfully" + +- name: delete time machine + ntnx_ndb_time_machine_clusters: + state: absent + time_machine_uuid: "{{result.time_machine_uuid}}" + cluster: + uuid: "{{cluster.cluster2.uuid}}" + register: result + +- name: check delete status + assert: + that: + - result.response is defined + - result.time_machine_uuid is defined + - result.changed == true + - result.failed == false + fail_msg: "fail: Unable to remove cluster from time machine" + success_msg: "pass: cluster from time machine removed successfully" + +####################### \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_and_info/tasks/main.yml b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/main.yml similarity index 76% rename from tests/integration/targets/ntnx_ndb_databases_and_info/tasks/main.yml rename to tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/main.yml index 87263bc16..df1211a7a 100644 --- a/tests/integration/targets/ntnx_ndb_databases_and_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/tasks/main.yml @@ -6,4 +6,4 @@ nutanix_password: "{{ndb_password}}" validate_certs: false block: - - import_tasks: "crud.yml" + - import_tasks: "data_access_management_and_snapshots.yml" diff --git a/tests/integration/targets/ntnx_ndb_vlans/aliases b/tests/integration/targets/ntnx_ndb_vlans/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_vlans/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_vlans/meta/main.yml b/tests/integration/targets/ntnx_ndb_vlans/meta/main.yml new file mode 100644 index 000000000..6397436fc --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_vlans/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml b/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml new file mode 100644 index 000000000..2525a8224 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml @@ -0,0 +1,434 @@ +--- +- debug: + msg: Start testing ntnx_ndb_vlans + +- name: create Dhcp ndb vlan + ntnx_ndb_vlans: + name: "{{ndb_vlan.name}}" + vlan_type: DHCP + cluster: + uuid: "{{cluster.cluster2.uuid}}" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.changed == true + - result.vlan_uuid is defined + - result.failed == false + - result.response.name == "{{ndb_vlan.name}}" + - result.response.type == "DHCP" + - result.response.managed == false + - result.response.clusterId == "{{cluster.cluster2.uuid}}" + fail_msg: "fail: Unable to create Dhcp ndb vlan" + success_msg: "pass: create Dhcp ndb vlan finished successfully" + +################################################################ + + +- name: update ndb vlan type for static + ntnx_ndb_vlans: + vlan_uuid: "{{result.vlan_uuid}}" + vlan_type: Static + gateway: "{{ndb_vlan.gateway}}" + subnet_mask: "{{ndb_vlan.subnet_mask}}" + ip_pools: + - + start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" + - + start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" + end_ip: "{{ndb_vlan.ip_pools.1.end_ip}}" + primary_dns: "{{ndb_vlan.primary_dns}}" + secondary_dns: "{{ndb_vlan.secondary_dns}}" + dns_domain: "{{ndb_vlan.dns_domain}}" + register: result + ignore_errors: True + no_log: true + + +- name: check listing status + assert: + that: + - result.response is defined + - result.vlan_uuid is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ndb_vlan.name}}" + - result.response.type == "Static" + - result.response.managed == false + - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ndb_vlan.dns_domain}}" + - result.response.propertiesMap.VLAN_GATEWAY == "{{ndb_vlan.gateway}}" + - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ndb_vlan.primary_dns}}" + - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ndb_vlan.secondary_dns}}" + - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.subnet_mask}}" + - result.response.ipPools[0].endIP == "{{ndb_vlan.ip_pools.0.end_ip}}" + - result.response.ipPools[0].startIP == "{{ndb_vlan.ip_pools.0.start_ip}}" + - result.response.ipPools[1].endIP == "{{ndb_vlan.ip_pools.1.end_ip}}" + - result.response.ipPools[1].startIP == "{{ndb_vlan.ip_pools.1.start_ip}}" + fail_msg: "fail: unable to update ndb vlan type for static" + success_msg: "pass: update ndb vlan type for static finished succesfully" + +- set_fact: + todelete: [] + +- set_fact: + todelete: "{{ todelete + [ result.vlan_uuid ] }}" +################################################################ +- name: List all NDB vlans + ntnx_ndb_vlans_info: + register: vlans + no_log: true + ignore_errors: True + +- name: check listing status + assert: + that: + - vlans.response is defined + - vlans.failed == false + - vlans.changed == false + - vlans.response | length > 0 + fail_msg: "Unable to list all NDB vlans" + success_msg: "NDB vlans listed successfully" + +################################################################ + +- name: get NDB vlans using it's name + ntnx_ndb_vlans_info: + name: "{{vlans.response[0].name}}" + register: result + no_log: true + ignore_errors: True + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.id == "{{vlans.response[0].id}}" + fail_msg: "Unable to get NDB vlans using it's name " + success_msg: "get NDB vlans using it's name successfully" + +################################################################ + +- name: List vlans use id + ntnx_ndb_vlans_info: + uuid: "{{vlans.response[0].id}}" + register: result + no_log: true + ignore_errors: True + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{vlans.response[0].name}}" + fail_msg: "Unable to get NDB vlans using it's id " + success_msg: "get NDB vlans using it's id successfully" + +################################################################ + +- name: get NDB vlans with incorrect name + ntnx_ndb_vlans_info: + name: "abcd" + register: result + no_log: true + ignore_errors: True + +- name: check listing status + assert: + that: + - result.error is defined + - result.failed == true + - result.changed == false + fail_msg: "Fail: module didn't errored out correctly when incorrect name is given" + success_msg: "Pass: module errored out correctly when incorrect name is given" +################################################################ + +- name: Delete created vlan's + ntnx_ndb_vlans: + state: absent + vlan_uuid: "{{ todelete[0]}}" + register: result + no_log: true + ignore_errors: True + +- set_fact: + todelete: [] + +################################################################ + +- name: create static ndb vlan + ntnx_ndb_vlans: + name: "{{ndb_vlan.name}}" + vlan_type: Static + cluster: + uuid: "{{cluster.cluster2.uuid}}" + gateway: "{{ndb_vlan.gateway}}" + subnet_mask: "{{ndb_vlan.subnet_mask}}" + ip_pools: + - + start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" + - + start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" + end_ip: "{{ndb_vlan.ip_pools.1.end_ip}}" + primary_dns: "{{ndb_vlan.primary_dns}}" + secondary_dns: "{{ndb_vlan.secondary_dns}}" + dns_domain: "{{ndb_vlan.dns_domain}}" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.vlan_uuid is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ndb_vlan.name}}" + - result.response.type == "Static" + - result.response.managed == false + - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ndb_vlan.dns_domain}}" + - result.response.propertiesMap.VLAN_GATEWAY == "{{ndb_vlan.gateway}}" + - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ndb_vlan.primary_dns}}" + - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ndb_vlan.secondary_dns}}" + - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.subnet_mask}}" + - result.response.ipPools[0].endIP == "{{ndb_vlan.ip_pools.0.end_ip}}" + - result.response.ipPools[0].startIP == "{{ndb_vlan.ip_pools.0.start_ip}}" + - result.response.ipPools[1].endIP == "{{ndb_vlan.ip_pools.1.end_ip}}" + - result.response.ipPools[1].startIP == "{{ndb_vlan.ip_pools.1.start_ip}}" + fail_msg: "fail: unable to create static ndb vlan" + success_msg: "pass: create static ndb vlan finished succesfully" +- set_fact: + todelete: "{{ todelete + [ result.vlan_uuid ] }}" + +################################################################ + +- name: update ndb vlan by removing ip pool + ntnx_ndb_vlans: + vlan_uuid: "{{result.vlan_uuid}}" + remove_ip_pools: + - "{{result.response.ipPools[0].id}}" + - "{{result.response.ipPools[1].id}}" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.vlan_uuid is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ndb_vlan.name}}" + - result.response.type == "Static" + - result.response.managed == false + - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ndb_vlan.dns_domain}}" + - result.response.propertiesMap.VLAN_GATEWAY == "{{ndb_vlan.gateway}}" + - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ndb_vlan.primary_dns}}" + - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ndb_vlan.secondary_dns}}" + - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.subnet_mask}}" + - result.response.ipPools == [] + fail_msg: "fail: unable to update ndb vlan by removing ip pool" + success_msg: "pass: update ndb vlan by removing ip pool finished succesfully" + +################################################################ + +- name: update ndb vlan by adding a pool + ntnx_ndb_vlans: + vlan_uuid: "{{result.vlan_uuid}}" + ip_pools: + - + start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ndb_vlan.name}}" + - result.response.ipPools is defined + - result.response.name == "{{ndb_vlan.name}}" + - result.response.type == "Static" + - result.response.managed == false + - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ndb_vlan.dns_domain}}" + - result.response.propertiesMap.VLAN_GATEWAY == "{{ndb_vlan.gateway}}" + - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ndb_vlan.primary_dns}}" + - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ndb_vlan.secondary_dns}}" + - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.subnet_mask}}" + - result.response.ipPools[0].endIP == "{{ndb_vlan.ip_pools.0.end_ip}}" + - result.response.ipPools[0].startIP == "{{ndb_vlan.ip_pools.0.start_ip}}" + fail_msg: "fail: unable to update ndb vlan by adding a pool " + success_msg: "pass: update ndb vlan by adding a pool finished succesfully" + +################################################################ + +- name: update ndb vLAN Configuration + ntnx_ndb_vlans: + vlan_uuid: "{{result.vlan_uuid}}" + gateway: "{{ndb_vlan.updated_gateway}}" + subnet_mask: "{{ndb_vlan.updated_subnet_mask}}" + primary_dns: "{{ndb_vlan.updated_primary_dns}}" + secondary_dns: "{{ndb_vlan.updated_secondary_dns}}" + dns_domain: "{{ndb_vlan.updated_dns_domain}}" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ndb_vlan.name}}" + - result.response.managed == false + - result.response.propertiesMap.VLAN_DNS_DOMAIN == "{{ndb_vlan.updated_dns_domain}}" + - result.response.propertiesMap.VLAN_GATEWAY == "{{ndb_vlan.updated_gateway}}" + - result.response.propertiesMap.VLAN_PRIMARY_DNS == "{{ndb_vlan.updated_primary_dns}}" + - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ndb_vlan.updated_secondary_dns}}" + - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.updated_subnet_mask}}" + fail_msg: "fail: unable to update ndb vLAN Configuration" + success_msg: "pass: update ndb vLAN Configuration finished succesfully" + +################################################################ + +- name: update ndb vlan type + ntnx_ndb_vlans: + vlan_uuid: "{{result.vlan_uuid}}" + vlan_type: DHCP + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + - result.response.name == "{{ndb_vlan.name}}" + - result.response.type == "DHCP" + - result.response.managed == false + - result.vlan_uuid is defined + - result.response.properties == [] + fail_msg: "fail: unable to update ndb vlan type " + success_msg: "pass: update ndb vlan type finished succesfully" + +################################################################ + +# - name: create ndb_stretched vlan +# ntnx_ndb_stretched_vlans: +# name: "{{st_vlan.name}}" +# desc: "{{st_vlan.desc}}" +# vlans: +# - "{{st_vlan.vlans_subnet1[0]}}" +# - "{{st_vlan.vlans_subnet1[1]}}" +# register: result +# ignore_errors: true +# - debug: +# var: result +# - name: check listing status +# assert: +# that: +# - result.response is defined +# - result.changed == true +# - result.vlan_uuid is defined +# - result.failed == false +# - result.response.name == "{{st_vlan.name}}" +# - result.response.description =="{{st_vlan.desc}}" +# - result.response.vlanIds[0] == "{{st_vlan.vlans_subnet1[0]}}" +# - result.response.vlanIds[1] == "{{st_vlan.vlans_subnet1[1]}}" +# fail_msg: "fail: Unable to create ndb_stretched vlan" +# success_msg: "pass: create ndb_stretched vlan finished successfully" + +# - set_fact: +# todelete: "{{ todelete + [ result.vlan_uuid ] }}" + +# ################################################################ + + +# - name: update ndb_stretched vlan name, desc and vlans +# ntnx_ndb_stretched_vlans: +# name: newname +# desc: newdesc +# vlan_uuid: "{{result.vlan_uuid}}" +# vlans: +# - "{{st_vlan.vlans_subnet2[0]}}" +# - "{{st_vlan.vlans_subnet2[1]}}" +# register: result +# ignore_errors: true +# - debug: +# var: result +# - name: check listing status +# assert: +# that: +# - result.response is defined +# - result.changed == true +# - result.vlan_uuid is defined +# - result.failed == false +# - result.response.name == "newname" +# - result.response.description == "newdesc" +# - result.response.vlanIds[0] == "{{st_vlan.vlans_subnet2[1]}}" +# - result.response.vlanIds[1] == "{{st_vlan.vlans_subnet2[0]}}" +# fail_msg: "fail: Unable to update ndb_stretched vlan name, desc and vlans " +# success_msg: "pass: update ndb_stretched vlan name, desc and vlans finished successfully" + +# ################################################################ + +# - name: update ndb_stretched vlan subnet_mask, gateway +# ntnx_ndb_stretched_vlans: +# vlan_uuid: "{{result.vlan_uuid}}" +# gateway: "{{st_vlan.gateway}}" +# subnet_mask: "{{st_vlan.subnet_mask}}" +# register: result +# ignore_errors: true +# - debug: +# var: result +# - name: check listing status +# assert: +# that: +# - result.response is defined +# - result.changed == true +# - result.vlan_uuid is defined +# - result.failed == false +# - result.response.metadata.gateway == "{{st_vlan.gateway}}" +# - result.response.metadata.subnet_mask == "{{st_vlan.subnet_mask}}" +# fail_msg: "fail: Unable to update ndb_stretched subnet_mask, gateway " +# success_msg: "pass: update ndb_stretched subnet_mask, gateway finished successfully" + +################################################################ + +- name: Delete all created vlan's + ntnx_ndb_vlans: + state: absent + vlan_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.changed is defined + - result.changed == true + - result.msg == "All items completed" + fail_msg: "unable to delete all created vlan's" + success_msg: "All vlan'sdeleted succesfully" +- set_fact: + todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_vlans/tasks/main.yml b/tests/integration/targets/ntnx_ndb_vlans/tasks/main.yml new file mode 100644 index 000000000..2ffefe7dc --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_vlans/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ndb_ip}}" + nutanix_username: "{{ndb_username}}" + nutanix_password: "{{ndb_password}}" + validate_certs: false + block: + - import_tasks: "create_vlans.yml" + - import_tasks: "negativ_scenarios.yml" diff --git a/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml b/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml new file mode 100644 index 000000000..811233ada --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml @@ -0,0 +1,126 @@ +--- +- debug: + msg: Start negative secanrios ntnx_ndb_vlans + +- name: create Dhcp ndb vlan with static Configuration + ntnx_ndb_vlans: + name: "{{ndb_vlan.name}}" + vlan_type: DHCP + gateway: "{{ndb_vlan.gateway}}" + subnet_mask: "{{ndb_vlan.subnet_mask}}" + ip_pools: + - + start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" + primary_dns: "{{ndb_vlan.primary_dns}}" + secondary_dns: "{{ndb_vlan.secondary_dns}}" + dns_domain: "{{ndb_vlan.dns_domain}}" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.changed == false + - result.failed == true + - result.msg == "Failed generating create vlan spec" + fail_msg: "fail: create Dhcp ndb vlan with static Configuration finished succesfully" + success_msg: "pass: Returnerd error as expected" +# ############################### +- name: create static ndb vlan with missing Configuration + ntnx_ndb_vlans: + name: "{{ndb_vlan.name}}" + vlan_type: Static + gateway: "{{ndb_vlan.gateway}}" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.changed == false + - result.failed == true + - result.msg == "Failed generating create vlan spec" + fail_msg: "fail: create static ndb vlan with missing Configuration finished succesfully" + success_msg: "pass: Returnerd error as expected" + +########### +- name: create Dhcp ndb vlan + ntnx_ndb_vlans: + name: "{{ndb_vlan.name}}" + vlan_type: DHCP + cluster: + uuid: "{{cluster.cluster2.uuid}}" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.changed == true + - result.vlan_uuid is defined + - result.failed == false + - result.response.name == "{{ndb_vlan.name}}" + - result.response.type == "DHCP" + - result.response.managed == false + - result.response.clusterId == "{{cluster.cluster2.uuid}}" + fail_msg: "fail: Unable to create Dhcp ndb vlan" + success_msg: "pass: create Dhcp ndb vlan finished successfully" + +- set_fact: + todelete: "{{ todelete + [ result.vlan_uuid ] }}" +###################### +- name: update dhcp ndb vlan with static Configuration + ntnx_ndb_vlans: + vlan_uuid: "{{result.vlan_uuid}}" + gateway: "{{ndb_vlan.gateway}}" + subnet_mask: "{{ndb_vlan.subnet_mask}}" + ip_pools: + - + start_ip: "{{ndb_vlan.ip_pools.0.start_ip}}" + end_ip: "{{ndb_vlan.ip_pools.0.end_ip}}" + - + start_ip: "{{ndb_vlan.ip_pools.1.start_ip}}" + end_ip: "{{ndb_vlan.ip_pools.1.end_ip}}" + primary_dns: "{{ndb_vlan.primary_dns}}" + secondary_dns: "{{ndb_vlan.secondary_dns}}" + dns_domain: "{{ndb_vlan.dns_domain}}" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.changed == false + - result.failed == true + - result.msg == "Failed generating update vlan spec" + fail_msg: "fail: update dhcp ndb vlan with static Configuration finished succesfully" + success_msg: "pass: Returnerd error as expected" + +################################## + +- name: Delete all created vlan's + ntnx_ndb_vlans: + state: absent + vlan_uuid: "{{ item }}" + register: result + loop: "{{ todelete }}" + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.changed is defined + - result.changed == true + - result.msg == "All items completed" + fail_msg: "unable to delete all created vlan's" + success_msg: "All vlan'sdeleted succesfully" + +- set_fact: + todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ova/aliases b/tests/integration/targets/ntnx_ova/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_ova/aliases +++ b/tests/integration/targets/ntnx_ova/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_permissions_info/aliases b/tests/integration/targets/ntnx_permissions_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_permissions_info/aliases +++ b/tests/integration/targets/ntnx_permissions_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_projects/aliases b/tests/integration/targets/ntnx_projects/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_projects/aliases +++ b/tests/integration/targets/ntnx_projects/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_projects_info/aliases b/tests/integration/targets/ntnx_projects_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_projects_info/aliases +++ b/tests/integration/targets/ntnx_projects_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_roles/aliases b/tests/integration/targets/ntnx_roles/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_roles/aliases +++ b/tests/integration/targets/ntnx_roles/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_roles_info/aliases b/tests/integration/targets/ntnx_roles_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_roles_info/aliases +++ b/tests/integration/targets/ntnx_roles_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_security_rules/aliases b/tests/integration/targets/ntnx_security_rules/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_security_rules/aliases +++ b/tests/integration/targets/ntnx_security_rules/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_security_rules_info/aliases b/tests/integration/targets/ntnx_security_rules_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_security_rules_info/aliases +++ b/tests/integration/targets/ntnx_security_rules_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_service_groups/aliases b/tests/integration/targets/ntnx_service_groups/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_service_groups/aliases +++ b/tests/integration/targets/ntnx_service_groups/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_service_groups_info/aliases b/tests/integration/targets/ntnx_service_groups_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_service_groups_info/aliases +++ b/tests/integration/targets/ntnx_service_groups_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_static_routes/aliases b/tests/integration/targets/ntnx_static_routes/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_static_routes/aliases +++ b/tests/integration/targets/ntnx_static_routes/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_static_routes_info/aliases b/tests/integration/targets/ntnx_static_routes_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_static_routes_info/aliases +++ b/tests/integration/targets/ntnx_static_routes_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_user_groups/aliases b/tests/integration/targets/ntnx_user_groups/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_user_groups/aliases +++ b/tests/integration/targets/ntnx_user_groups/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_user_groups_info/aliases b/tests/integration/targets/ntnx_user_groups_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_user_groups_info/aliases +++ b/tests/integration/targets/ntnx_user_groups_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_users/aliases b/tests/integration/targets/ntnx_users/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_users/aliases +++ b/tests/integration/targets/ntnx_users/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_users_info/aliases b/tests/integration/targets/ntnx_users_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_users_info/aliases +++ b/tests/integration/targets/ntnx_users_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/ntnx_vms_clone/aliases b/tests/integration/targets/ntnx_vms_clone/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/ntnx_vms_clone/aliases +++ b/tests/integration/targets/ntnx_vms_clone/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/nutanix_floating_ips/aliases b/tests/integration/targets/nutanix_floating_ips/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/nutanix_floating_ips/aliases +++ b/tests/integration/targets/nutanix_floating_ips/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/nutanix_floating_ips_info/aliases b/tests/integration/targets/nutanix_floating_ips_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/nutanix_floating_ips_info/aliases +++ b/tests/integration/targets/nutanix_floating_ips_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/nutanix_pbrs/aliases b/tests/integration/targets/nutanix_pbrs/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/nutanix_pbrs/aliases +++ b/tests/integration/targets/nutanix_pbrs/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/nutanix_pbrs_info/aliases b/tests/integration/targets/nutanix_pbrs_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/nutanix_pbrs_info/aliases +++ b/tests/integration/targets/nutanix_pbrs_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/nutanix_subnets/aliases b/tests/integration/targets/nutanix_subnets/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/nutanix_subnets/aliases +++ b/tests/integration/targets/nutanix_subnets/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/nutanix_subnets_info/aliases b/tests/integration/targets/nutanix_subnets_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/nutanix_subnets_info/aliases +++ b/tests/integration/targets/nutanix_subnets_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/nutanix_vms/aliases b/tests/integration/targets/nutanix_vms/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/nutanix_vms/aliases +++ b/tests/integration/targets/nutanix_vms/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/nutanix_vms_info/aliases b/tests/integration/targets/nutanix_vms_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/nutanix_vms_info/aliases +++ b/tests/integration/targets/nutanix_vms_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/nutanix_vpcs/aliases b/tests/integration/targets/nutanix_vpcs/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/nutanix_vpcs/aliases +++ b/tests/integration/targets/nutanix_vpcs/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/nutanix_vpcs_info/aliases b/tests/integration/targets/nutanix_vpcs_info/aliases index e69de29bb..7a68b11da 100644 --- a/tests/integration/targets/nutanix_vpcs_info/aliases +++ b/tests/integration/targets/nutanix_vpcs_info/aliases @@ -0,0 +1 @@ +disabled diff --git a/tests/integration/targets/prepare_env/tasks/cleanup.yml b/tests/integration/targets/prepare_env/tasks/cleanup.yml index 481562166..94cb70cf4 100644 --- a/tests/integration/targets/prepare_env/tasks/cleanup.yml +++ b/tests/integration/targets/prepare_env/tasks/cleanup.yml @@ -7,53 +7,53 @@ tasks: - name: include var file include_vars: ../vars/main.yml - - name: Delete VM - ntnx_vms: - vm_uuid: '{{vm.uuid }}' - state: absent - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - # - name: Delete DR VM - # ntnx_vms: - # vm_uuid: '{{dr_vm.uuid }}' - # state: absent - # nutanix_host: "{{ ip }}" - # nutanix_username: "{{ username }}" - # nutanix_password: "{{ password }}" - # validate_certs: False - - name: Delete overlay - ntnx_subnets: - state: absent - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - subnet_uuid: "{{item }}" - loop: - - "{{overlay.uuid}}" - - name: Delete vpc - ntnx_vpcs: - state: absent - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - vpc_uuid: "{{ vpc.uuid }}" - - name: Delete subnets - ntnx_subnets: - state: absent - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: false - subnet_uuid: "{{item }}" - loop: - - "{{external_nat_subnet.uuid}}" - - "{{static.uuid}}" +# - name: Delete VM +# ntnx_vms: +# vm_uuid: '{{vm.uuid }}' +# state: absent +# nutanix_host: "{{ ip }}" +# nutanix_username: "{{ username }}" +# nutanix_password: "{{ password }}" +# validate_certs: False +# # - name: Delete DR VM +# # ntnx_vms: +# # vm_uuid: '{{dr_vm.uuid }}' +# # state: absent +# # nutanix_host: "{{ ip }}" +# # nutanix_username: "{{ username }}" +# # nutanix_password: "{{ password }}" +# # validate_certs: False +# - name: Delete overlay +# ntnx_subnets: +# state: absent +# nutanix_host: "{{ ip }}" +# nutanix_username: "{{ username }}" +# nutanix_password: "{{ password }}" +# validate_certs: false +# subnet_uuid: "{{item }}" +# loop: +# - "{{overlay.uuid}}" +# - name: Delete vpc +# ntnx_vpcs: +# state: absent +# nutanix_host: "{{ ip }}" +# nutanix_username: "{{ username }}" +# nutanix_password: "{{ password }}" +# validate_certs: False +# vpc_uuid: "{{ vpc.uuid }}" +# - name: Delete subnets +# ntnx_subnets: +# state: absent +# nutanix_host: "{{ ip }}" +# nutanix_username: "{{ username }}" +# nutanix_password: "{{ password }}" +# validate_certs: false +# subnet_uuid: "{{item }}" +# loop: +# - "{{external_nat_subnet.uuid}}" +# - "{{static.uuid}}" - - name: Delete downloaded disk file - file: - path: "{{ disk_image.dest }}" - state: absent \ No newline at end of file +# - name: Delete downloaded disk file +# file: +# path: "{{ disk_image.dest }}" +# state: absent \ No newline at end of file diff --git a/tests/integration/targets/prepare_env/tasks/prepare_env.yml b/tests/integration/targets/prepare_env/tasks/prepare_env.yml index 87a5a2d10..3c6c59d97 100644 --- a/tests/integration/targets/prepare_env/tasks/prepare_env.yml +++ b/tests/integration/targets/prepare_env/tasks/prepare_env.yml @@ -8,204 +8,204 @@ tasks: - name: include var file include_vars: ../vars/main.yml - - set_fact: - ip: "{{lookup('env', 'NUTANIX_HOST') }}" - username: "{{lookup('env', 'NUTANIX_USERNAME') }}" - password: "{{lookup('env', 'NUTANIX_PASSWORD') }}" - recovery_site_ip: "{{lookup('env', 'NUTANIX_DR_SITE')}}" - - name: Insert credentials block to vars - blockinfile: - path: ../vars/main.yml - marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 0" - block: | - ip: "{{lookup('env', 'NUTANIX_HOST') }}" - username: "{{lookup('env', 'NUTANIX_USERNAME') }}" - password: "{{lookup('env', 'NUTANIX_PASSWORD') }}" - recovery_site_ip: "{{lookup('env', 'NUTANIX_DR_SITE') }}" - - name: create external subnet with NAT - ntnx_subnets: - state: present - nutanix_host: "{{ ip }}" - validate_certs: false - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - name: "{{external_nat_subnets.name}}" - external_subnet: - vlan_id: "{{external_nat_subnets.vlan_id}}" - enable_nat: True - cluster: - name: "{{ cluster.name }}" - ipam: - network_ip: "{{ external_nat_subnets.network_ip }}" - network_prefix: "{{ external_nat_subnets.network_prefix }}" - gateway_ip: "{{ external_nat_subnets.gateway_ip_address }}" - ip_pools: - - start_ip: "{{ external_nat_subnets.dhcp.start_address }}" - end_ip: "{{ external_nat_subnets.dhcp.end_address }}" - register: result - - name: Insert external subnet configuration block to vars - blockinfile: - path: ../vars/main.yml - marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 1" - block: | - external_nat_subnet: - name: "{{external_nat_subnets.name}}" - uuid: "{{result.subnet_uuid}}" +# - set_fact: +# ip: "{{lookup('env', 'NUTANIX_HOST') }}" +# username: "{{lookup('env', 'NUTANIX_USERNAME') }}" +# password: "{{lookup('env', 'NUTANIX_PASSWORD') }}" +# recovery_site_ip: "{{lookup('env', 'NUTANIX_DR_SITE')}}" +# - name: Insert credentials block to vars +# blockinfile: +# path: ../vars/main.yml +# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 0" +# block: | +# ip: "{{lookup('env', 'NUTANIX_HOST') }}" +# username: "{{lookup('env', 'NUTANIX_USERNAME') }}" +# password: "{{lookup('env', 'NUTANIX_PASSWORD') }}" +# recovery_site_ip: "{{lookup('env', 'NUTANIX_DR_SITE') }}" +# - name: create external subnet with NAT +# ntnx_subnets: +# state: present +# nutanix_host: "{{ ip }}" +# validate_certs: false +# nutanix_username: "{{ username }}" +# nutanix_password: "{{ password }}" +# name: "{{external_nat_subnets.name}}" +# external_subnet: +# vlan_id: "{{external_nat_subnets.vlan_id}}" +# enable_nat: True +# cluster: +# name: "{{ cluster.name }}" +# ipam: +# network_ip: "{{ external_nat_subnets.network_ip }}" +# network_prefix: "{{ external_nat_subnets.network_prefix }}" +# gateway_ip: "{{ external_nat_subnets.gateway_ip_address }}" +# ip_pools: +# - start_ip: "{{ external_nat_subnets.dhcp.start_address }}" +# end_ip: "{{ external_nat_subnets.dhcp.end_address }}" +# register: result +# - name: Insert external subnet configuration block to vars +# blockinfile: +# path: ../vars/main.yml +# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 1" +# block: | +# external_nat_subnet: +# name: "{{external_nat_subnets.name}}" +# uuid: "{{result.subnet_uuid}}" - - name: Create min VPC with external_subnet uuid - ntnx_vpcs: - validate_certs: False - state: present - wait: true - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - name: "{{vpc_name}}" - external_subnets: - - subnet_uuid: "{{ result.subnet_uuid }}" - register: result +# - name: Create min VPC with external_subnet uuid +# ntnx_vpcs: +# validate_certs: False +# state: present +# wait: true +# nutanix_host: "{{ ip }}" +# nutanix_username: "{{ username }}" +# nutanix_password: "{{ password }}" +# name: "{{vpc_name}}" +# external_subnets: +# - subnet_uuid: "{{ result.subnet_uuid }}" +# register: result - - name: Insert VPC configuration block to vars - blockinfile: - path: ../vars/main.yml - marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 2" - block: | - vpc: - name: "{{vpc_name}}" - uuid: "{{result.vpc_uuid}}" +# - name: Insert VPC configuration block to vars +# blockinfile: +# path: ../vars/main.yml +# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 2" +# block: | +# vpc: +# name: "{{vpc_name}}" +# uuid: "{{result.vpc_uuid}}" - - name: create vlan subnet with IPAM - ntnx_subnets: - state: present - nutanix_host: "{{ ip }}" - wait: true - validate_certs: false - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - name: "{{static_subnet_name}}" - vlan_subnet: - vlan_id: 373 - virtual_switch: - name: vs0 - cluster: - name: "{{ cluster.name }}" - ipam: - network_ip: 10.30.30.0 - network_prefix: 24 - gateway_ip: 10.30.30.254 - ip_pools: - - start_ip: 10.30.30.10 - end_ip: 10.30.30.90 - register: result +# - name: create vlan subnet with IPAM +# ntnx_subnets: +# state: present +# nutanix_host: "{{ ip }}" +# wait: true +# validate_certs: false +# nutanix_username: "{{ username }}" +# nutanix_password: "{{ password }}" +# name: "{{static_subnet_name}}" +# vlan_subnet: +# vlan_id: 373 +# virtual_switch: +# name: vs0 +# cluster: +# name: "{{ cluster.name }}" +# ipam: +# network_ip: 10.30.30.0 +# network_prefix: 24 +# gateway_ip: 10.30.30.254 +# ip_pools: +# - start_ip: 10.30.30.10 +# end_ip: 10.30.30.90 +# register: result - - name: Insert vlan subnet configuration block to var file - blockinfile: - path: ../vars/main.yml - marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 3" - block: | - static: - name: "{{static_subnet_name}}" - uuid: "{{result.subnet_uuid}}" - network_ip: 10.30.30.0 - network_prefix: 24 - gateway_ip: 10.30.30.254 +# - name: Insert vlan subnet configuration block to var file +# blockinfile: +# path: ../vars/main.yml +# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 3" +# block: | +# static: +# name: "{{static_subnet_name}}" +# uuid: "{{result.subnet_uuid}}" +# network_ip: 10.30.30.0 +# network_prefix: 24 +# gateway_ip: 10.30.30.254 - - name: include var file - include_vars: ../vars/main.yml - - name: create overlay Subnet with minimum requirements - ntnx_subnets: - state: present - nutanix_host: "{{ ip }}" - validate_certs: false - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - name: "{{overlay_subnet.name}}" - overlay_subnet: - vpc: - uuid: "{{ vpc.uuid }}" - ipam: - network_ip: "{{overlay_subnet.network_ip}}" - network_prefix: "{{overlay_subnet.network_prefix}}" - gateway_ip: "{{overlay_subnet.gateway_ip}}" - register: result +# - name: include var file +# include_vars: ../vars/main.yml +# - name: create overlay Subnet with minimum requirements +# ntnx_subnets: +# state: present +# nutanix_host: "{{ ip }}" +# validate_certs: false +# nutanix_username: "{{ username }}" +# nutanix_password: "{{ password }}" +# name: "{{overlay_subnet.name}}" +# overlay_subnet: +# vpc: +# uuid: "{{ vpc.uuid }}" +# ipam: +# network_ip: "{{overlay_subnet.network_ip}}" +# network_prefix: "{{overlay_subnet.network_prefix}}" +# gateway_ip: "{{overlay_subnet.gateway_ip}}" +# register: result - - name: Insert overlay subnet configuration block to var file - blockinfile: - path: ../vars/main.yml - marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 4" - block: | - overlay: - name: "{{overlay_subnet.name}}" - uuid: "{{result.subnet_uuid}}" +# - name: Insert overlay subnet configuration block to var file +# blockinfile: +# path: ../vars/main.yml +# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 4" +# block: | +# overlay: +# name: "{{overlay_subnet.name}}" +# uuid: "{{result.subnet_uuid}}" - - name: create VM with overlay subnet - ntnx_vms: - state: present - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: False - name: "{{vm_name}}" - cluster: - uuid: "{{ cluster.uuid }}" - networks: - - is_connected: true - subnet: - name: "{{overlay_subnet.name}}" - private_ip: "{{overlay_subnet.private_ip}}" - register: result +# - name: create VM with overlay subnet +# ntnx_vms: +# state: present +# nutanix_host: "{{ ip }}" +# nutanix_username: "{{ username }}" +# nutanix_password: "{{ password }}" +# validate_certs: False +# name: "{{vm_name}}" +# cluster: +# uuid: "{{ cluster.uuid }}" +# networks: +# - is_connected: true +# subnet: +# name: "{{overlay_subnet.name}}" +# private_ip: "{{overlay_subnet.private_ip}}" +# register: result - - name: Insert vm configuration block to var file - blockinfile: - path: ../vars/main.yml - marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 5" - block: | - vm: - name: "{{vm_name}}" - uuid: "{{result.vm_uuid}}" +# - name: Insert vm configuration block to var file +# blockinfile: +# path: ../vars/main.yml +# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 5" +# block: | +# vm: +# name: "{{vm_name}}" +# uuid: "{{result.vm_uuid}}" - # - name: create VM with static subnet for dr tests - # ntnx_vms: - # state: present - # nutanix_host: "{{ ip }}" - # nutanix_username: "{{ username }}" - # nutanix_password: "{{ password }}" - # validate_certs: False - # name: "{{dr_vm_name}}" - # categories: - # Environment: - # - Staging - # - Testing - # cluster: - # uuid: "{{ cluster.uuid }}" - # networks: - # - is_connected: true - # subnet: - # name: "{{static_subnet_name}}" - # vcpus: 1 - # cores_per_vcpu: 1 - # memory_gb: 1 - # register: result +# # - name: create VM with static subnet for dr tests +# # ntnx_vms: +# # state: present +# # nutanix_host: "{{ ip }}" +# # nutanix_username: "{{ username }}" +# # nutanix_password: "{{ password }}" +# # validate_certs: False +# # name: "{{dr_vm_name}}" +# # categories: +# # Environment: +# # - Staging +# # - Testing +# # cluster: +# # uuid: "{{ cluster.uuid }}" +# # networks: +# # - is_connected: true +# # subnet: +# # name: "{{static_subnet_name}}" +# # vcpus: 1 +# # cores_per_vcpu: 1 +# # memory_gb: 1 +# # register: result - # - name: Insert vm configuration block to var file - # blockinfile: - # path: ../vars/main.yml - # marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 6" - # block: | - # dr_vm: - # name: "{{dr_vm_name}}" - # uuid: "{{result.vm_uuid}}" +# # - name: Insert vm configuration block to var file +# # blockinfile: +# # path: ../vars/main.yml +# # marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 6" +# # block: | +# # dr_vm: +# # name: "{{dr_vm_name}}" +# # uuid: "{{result.vm_uuid}}" - - name: Downloading disk image for image related tests - get_url: - url: "{{ disk_image.url }}" - dest: "{{ disk_image.dest }}" +# - name: Downloading disk image for image related tests +# get_url: +# url: "{{ disk_image.url }}" +# dest: "{{ disk_image.dest }}" - # - name: create address group for network security policy related tests - # ntnx_address_groups: - # state: present - # name: dest - # desc: dest - # subnets: - # - network_ip: "10.1.1.0" - # network_prefix: 24 \ No newline at end of file +# # - name: create address group for network security policy related tests +# # ntnx_address_groups: +# # state: present +# # name: dest +# # desc: dest +# # subnets: +# # - network_ip: "10.1.1.0" +# # network_prefix: 24 \ No newline at end of file diff --git a/tests/integration/targets/prepare_foundation_env/tasks/cleanup.yml b/tests/integration/targets/prepare_foundation_env/tasks/cleanup.yml index 940ca10a6..bc4abe367 100644 --- a/tests/integration/targets/prepare_foundation_env/tasks/cleanup.yml +++ b/tests/integration/targets/prepare_foundation_env/tasks/cleanup.yml @@ -5,8 +5,8 @@ tasks: - name: include var file include_vars: ../vars/main.yml - - name: Delete files - file: - path: "{{ source }}" - state: absent - ignore_errors: true \ No newline at end of file + # - name: Delete files + # file: + # path: "{{ source }}" + # state: absent + # ignore_errors: true \ No newline at end of file diff --git a/tests/integration/targets/prepare_ndb_env/tasks/prepare_env.yml b/tests/integration/targets/prepare_ndb_env/tasks/prepare_env.yml new file mode 100644 index 000000000..70debb3c5 --- /dev/null +++ b/tests/integration/targets/prepare_ndb_env/tasks/prepare_env.yml @@ -0,0 +1,23 @@ +--- +- name: prepare the environment for ndb + hosts: localhost + gather_facts: false + collections: + - nutanix.ncp + + tasks: + - name: include var file + include_vars: ../vars/main.yml + - set_fact: + ndb_ip: "{{lookup('env', 'NDB_HOST') }}" + ndb_username: "{{lookup('env', 'NDB_USERNAME') }}" + ndb_password: "{{lookup('env', 'NDB_PASSWORD') }}" + - name: Insert credentials block to vars + blockinfile: + path: ../vars/main.yml + marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 0" + block: | + ndb_ip: "{{lookup('env', 'NDB_HOST') }}" + ndb_username: "{{lookup('env', 'NDB_USERNAME') }}" + ndb_password: "{{lookup('env', 'NDB_PASSWORD') }}" + \ No newline at end of file diff --git a/tests/integration/targets/prepare_ndb_env/tasks/tmp/.gitkeep b/tests/integration/targets/prepare_ndb_env/tasks/tmp/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/prepare_ndb_env/vars/.gitkeep b/tests/integration/targets/prepare_ndb_env/vars/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/prepare_ndb_env/vars/main.yml b/tests/integration/targets/prepare_ndb_env/vars/main.yml new file mode 100644 index 000000000..e69de29bb