From 923e9c76915c55006882df6b0623ffb781ed854d Mon Sep 17 00:00:00 2001 From: Pradeepsingh Bhati Date: Tue, 11 Jul 2023 16:52:38 +0530 Subject: [PATCH] v1.9.0 release branch and qualifications (#379) * fix bug#349 (#357) * fix bug#349 * fix --------- Co-authored-by: alaa-bish * Fetch available IPs for network profiles in NDB (#346) * Add available IPs in network profiles response as per flag * doc fix * isort fix * sanity fix * Fix mismatch of virtual switch in subnet creation (#341) * Fix mismatch of virtual switch in subnet creation * fix isort issues * fix for etcd min disk size (#356) * fix for etcd min disk size * fix doc and tests --------- Co-authored-by: Gevorg-Khachatryaan Co-authored-by: alaa-bish * Support multi-tiers target in application network security rule (#342) * Change apptier to have multiple app tiers in app security policy * Doc changes * Doc changes * flake8 fixes * sanity fixes * Fix setting some configuration for cluster in foundation (#343) * Fix cluster fields configuration issues * lint fix * fix filters combination expression (#353) * Feat/karbon clusters update (#351) * create and delete node pools * separate node pools entity * update functionality for nodes count and labels * sanity fix * functionality to get subnet by name * fix defaults * fix arguments * fix for etcd min disk size * Add integration tests * fix bug#349 * sanity fix * Revert "fix for etcd min disk size" This reverts commit fb30a68105dd5173670f16a042dfa0bfb6be99fb. * Revert "fix bug#349" This reverts commit 9a30aa33eedf087bc9c63474f2a5f9927d57e975. * fixes * fixes * doc fix * fixes to wait tasks completion * fix tests * fixes for update labels failure * fix for black issue * add tests * fix --------- Co-authored-by: Gevorg-Khachatryaan Co-authored-by: alaa-bish * fix tests * NDB tags info module (#348) * NDB tags info module with docs and tests * Doc fix * tags info doc fix * Minor spelling changes * Add nutanix open source support info in read me * Enable karbon tests * Enable PC setup * Disable NDB tests * set validate_certs * Delete user group post projects test for cleanup. Skip IDP tests * fix tests * Minor test fix * Fix sanity * Add docs and release notes. Add sanity tests for NDB and Foundation and enable their tests * sanity fixes * Add docs for karbon new module and minor fixes * sanity fixes * fix sanity * Disable netwoek segmentation in foundation sanity tests * Skip Liquid syntax checks for conflicting ansible playbook code * Update release dates --------- Co-authored-by: Gevorg Khachatryan Co-authored-by: alaa-bish --- CHANGELOG.md | 25 +- CHANGELOG.rst | 29 + README.md | 20 +- changelogs/changelog.yaml | 30 + examples/karbon/create_k8s_cluster.yml | 43 +- galaxy.yml | 2 +- meta/runtime.yml | 2 + plugins/module_utils/entity.py | 2 +- .../module_utils/foundation/image_nodes.py | 21 +- plugins/module_utils/karbon/clusters.py | 9 +- plugins/module_utils/karbon/node_pools.py | 176 ++++++ .../ndb/profiles/profile_types.py | 5 + plugins/module_utils/prism/groups.py | 12 +- plugins/module_utils/prism/security_rules.py | 4 +- plugins/module_utils/prism/subnets.py | 11 +- .../module_utils/prism/virtual_switches.py | 14 +- plugins/module_utils/utils.py | 19 + plugins/modules/ntnx_karbon_clusters.py | 2 +- .../ntnx_karbon_clusters_node_pools.py | 363 +++++++++++ plugins/modules/ntnx_ndb_profiles_info.py | 14 + plugins/modules/ntnx_ndb_tags_info.py | 192 ++++++ plugins/modules/ntnx_security_rules.py | 29 +- tests/integration/targets/ntnx_acps/aliases | 1 - .../targets/ntnx_acps/tasks/create_acps.yml | 2 +- .../targets/ntnx_acps/tasks/update_acps.yml | 6 +- .../targets/ntnx_acps_info/aliases | 2 +- .../targets/ntnx_address_groups/aliases | 2 +- .../targets/ntnx_address_groups_info/aliases | 2 +- .../targets/ntnx_categories/aliases | 2 +- .../ntnx_categories/tasks/all_operations.yml | 6 +- .../targets/ntnx_categories_info/aliases | 2 +- .../targets/ntnx_clusters_info/aliases | 2 +- .../tasks/get_node_info.yml | 2 +- .../targets/ntnx_foundation_sanity/aliases | 0 .../ntnx_foundation_sanity/meta/main.yml | 2 + .../tasks/image_nodes.yml | 172 ++++++ .../ntnx_foundation_sanity/tasks/main.yml | 6 + .../targets/ntnx_hosts_info/aliases | 2 +- .../aliases | 2 +- .../ntnx_image_placement_policy/aliases | 2 +- .../tasks/update.yml | 2 +- tests/integration/targets/ntnx_images/aliases | 2 +- .../targets/ntnx_images/tasks/update.yml | 2 +- .../targets/ntnx_images_info/aliases | 2 +- .../ntnx_karbon_clusters_and_info/aliases | 1 - .../{create_delete_and_info.yml => crud.yml} | 237 ++++++- .../tasks/main.yml | 4 +- ...v_scenarios.yml => negative_scenarios.yml} | 43 -- .../ntnx_karbon_registries/tasks/create.yml | 8 +- .../tasks/negativ_scenarios.yml | 2 +- .../ntnx_ndb_availability_databases/aliases | 2 +- .../tasks/tests.yml | 9 +- .../targets/ntnx_ndb_clusters/aliases | 1 + .../targets/ntnx_ndb_database_clones/aliases | 1 + .../ntnx_ndb_database_clones/tasks/clones.yml | 7 +- .../ntnx_ndb_databases_actions/aliases | 1 + .../tasks/all_actions.yml | 10 +- .../targets/ntnx_ndb_databases_sanity/aliases | 2 + .../ntnx_ndb_databases_sanity/meta/main.yml | 2 + .../ntnx_ndb_databases_sanity/tasks/main.yml | 10 + .../ntnx_ndb_databases_sanity/tasks/tests.yml | 578 ++++++++++++++++++ .../aliases | 1 + .../tasks/tests.yml | 8 +- .../aliases | 1 + .../tasks/tests.yml | 9 +- .../targets/ntnx_ndb_db_server_vms/aliases | 1 + .../ntnx_ndb_db_server_vms/tasks/crud.yml | 16 +- .../ntnx_ndb_maintenance_windows/aliases | 1 + .../targets/ntnx_ndb_profiles/aliases | 1 - .../ntnx_ndb_profiles/tasks/compute.yml | 10 +- .../ntnx_ndb_profiles/tasks/db_params.yml | 12 +- .../tasks/network_profile.yml | 156 ++--- .../targets/ntnx_ndb_profiles_info/aliases | 1 - .../ntnx_ndb_profiles_info/tasks/info.yml | 38 ++ .../integration/targets/ntnx_ndb_slas/aliases | 1 + .../targets/ntnx_ndb_slas/tasks/CRUD.yml | 8 +- .../targets/ntnx_ndb_snapshots_info/aliases | 1 + .../ntnx_ndb_software_profiles/aliases | 1 + .../ntnx_ndb_software_profiles/tasks/crud.yml | 4 + .../targets/ntnx_ndb_tags/tasks/crud.yml | 280 ++++++++- .../ntnx_ndb_time_machines_info/aliases | 1 + .../aliases | 1 + .../targets/ntnx_ndb_vlans/aliases | 1 + .../ntnx_ndb_vlans/tasks/create_vlans.yml | 14 +- .../tasks/negativ_scenarios.yml | 8 +- tests/integration/targets/ntnx_ova/aliases | 2 +- .../targets/ntnx_permissions_info/aliases | 2 +- .../integration/targets/ntnx_projects/aliases | 2 +- .../targets/ntnx_projects/tasks/main.yml | 2 +- ...ts.yml => projects_with_role_mappings.yml} | 29 +- .../targets/ntnx_projects_info/aliases | 2 +- .../tasks/protection_rules.yml | 2 +- tests/integration/targets/ntnx_roles/aliases | 2 +- .../targets/ntnx_roles_info/aliases | 2 +- .../targets/ntnx_security_rules/aliases | 2 +- .../ntnx_security_rules/tasks/app_rule.yml | 15 +- .../tasks/quarantine_rule.yml | 2 +- .../targets/ntnx_security_rules_info/aliases | 2 +- .../tasks/get_security_rules.yml | 2 +- .../targets/ntnx_service_groups/aliases | 2 +- .../ntnx_service_groups/tasks/create.yml | 2 +- .../ntnx_service_groups/tasks/update.yml | 2 +- .../targets/ntnx_service_groups_info/aliases | 2 +- .../targets/ntnx_static_routes/aliases | 2 +- .../ntnx_static_routes/tasks/create.yml | 4 +- .../targets/ntnx_static_routes_info/aliases | 2 +- .../ntnx_static_routes_info/tasks/info.yml | 2 +- .../targets/ntnx_user_groups/aliases | 2 +- .../targets/ntnx_user_groups/tasks/create.yml | 158 ++++- .../targets/ntnx_user_groups_info/aliases | 1 - .../ntnx_user_groups_info/meta/main.yml | 2 - .../ntnx_user_groups_info/tasks/main.yml | 9 - .../tasks/user_groups_info.yml | 100 --- tests/integration/targets/ntnx_users/aliases | 2 +- .../targets/ntnx_users/tasks/create.yml | 44 +- .../targets/ntnx_users_info/aliases | 2 +- .../targets/ntnx_vms_clone/aliases | 2 +- .../targets/nutanix_floating_ips/aliases | 2 +- .../targets/nutanix_floating_ips_info/aliases | 2 +- .../integration/targets/nutanix_pbrs/aliases | 2 +- .../targets/nutanix_pbrs_info/aliases | 2 +- .../targets/nutanix_subnets/aliases | 2 +- .../targets/nutanix_subnets_info/aliases | 2 +- tests/integration/targets/nutanix_vms/aliases | 2 +- .../targets/nutanix_vms_info/aliases | 2 +- .../integration/targets/nutanix_vpcs/aliases | 2 +- .../targets/nutanix_vpcs_info/aliases | 2 +- .../targets/prepare_env/tasks/cleanup.yml | 88 +-- .../targets/prepare_env/tasks/prepare_env.yml | 321 +++++----- 129 files changed, 2913 insertions(+), 664 deletions(-) create mode 100644 plugins/module_utils/karbon/node_pools.py create mode 100644 plugins/modules/ntnx_karbon_clusters_node_pools.py create mode 100644 plugins/modules/ntnx_ndb_tags_info.py create mode 100644 tests/integration/targets/ntnx_foundation_sanity/aliases create mode 100644 tests/integration/targets/ntnx_foundation_sanity/meta/main.yml create mode 100644 tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml create mode 100644 tests/integration/targets/ntnx_foundation_sanity/tasks/main.yml rename tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/{create_delete_and_info.yml => crud.yml} (57%) rename tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/{negativ_scenarios.yml => negative_scenarios.yml} (80%) create mode 100644 tests/integration/targets/ntnx_ndb_databases_sanity/aliases create mode 100644 tests/integration/targets/ntnx_ndb_databases_sanity/meta/main.yml create mode 100644 tests/integration/targets/ntnx_ndb_databases_sanity/tasks/main.yml create mode 100644 tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml rename tests/integration/targets/ntnx_projects/tasks/{advanced_projects.yml => projects_with_role_mappings.yml} (94%) delete mode 100644 tests/integration/targets/ntnx_user_groups_info/aliases delete mode 100644 tests/integration/targets/ntnx_user_groups_info/meta/main.yml delete mode 100644 tests/integration/targets/ntnx_user_groups_info/tasks/main.yml delete mode 100644 tests/integration/targets/ntnx_user_groups_info/tasks/user_groups_info.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index 687147663..e00212444 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,27 @@ -## v1.8.0 (28 Feb 2022) +## v1.9.0 (11 July 2023) + + +**Improvements:** + +- ntnx_profiles_info - [Impr] Develop ansible module for getting available IPs for given network profiles in NDB [\#345](https://github.com/nutanix/nutanix.ansible/issues/345) +- ntnx_security_rules - [Imprv] Flow Network Security Multi-Tier support in Security Policy definition [\#319](https://github.com/nutanix/nutanix.ansible/issues/319) + +**Bugs:** + +- info modules - [Bug] Multiple filters params are not considered for fetching entities in PC based info modules [[\#352](https://github.com/nutanix/nutanix.ansible/issues/352)] +- ntnx_foundation - [Bug] clusters parameters not being passed to Foundation Server in module nutanix.ncp.ntnx_foundation [[\#307](https://github.com/nutanix/nutanix.ansible/issues/307)] +- ntnx_karbon_clusters - [Bug] error in sample karbon/create_k8s_cluster.yml [[\#349](https://github.com/nutanix/nutanix.ansible/issues/349)] +- ntnx_karbon_clusters - [Bug] impossible to deploy NKE cluster with etcd using disk smaller than 120GB [[\#350](https://github.com/nutanix/nutanix.ansible/issues/350)] +- ntnx_subnets - [Bug] wrong virtual_switch selected in module ntnx_subnets [\#328](https://github.com/nutanix/nutanix.ansible/issues/328) + +**New Modules:** + +- ntnx_karbon_clusters_node_pools - Create,Update and Delete worker node pools with the provided configuration. +- ntnx_ndb_tags_info - info module for ndb tags info + + + +## v1.8.0 (28 Feb 2023) **Features** diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a563534d7..c6d112a4b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,35 @@ Nutanix.Ncp Release Notes .. contents:: Topics +v1.9.0 +====== + +Minor Changes +------------- + +- ntnx_profiles_info - [Impr] Develop ansible module for getting available IPs for given network profiles in NDB [\#345](https://github.com/nutanix/nutanix.ansible/issues/345) +- ntnx_security_rules - [Imprv] Flow Network Security Multi-Tier support in Security Policy definition [\#319](https://github.com/nutanix/nutanix.ansible/issues/319) + +Deprecated Features +------------------- + +- ntnx_security_rules - The ``apptier`` option in target group has been removed. New option called ``apptiers`` has been added to support multi tier policy. + +Bugfixes +-------- + +- info modules - [Bug] Multiple filters params are not considered for fetching entities in PC based info modules [[\#352](https://github.com/nutanix/nutanix.ansible/issues/352)] +- ntnx_foundation - [Bug] clusters parameters not being passed to Foundation Server in module nutanix.ncp.ntnx_foundation [[\#307](https://github.com/nutanix/nutanix.ansible/issues/307)] +- ntnx_karbon_clusters - [Bug] error in sample karbon/create_k8s_cluster.yml [[\#349](https://github.com/nutanix/nutanix.ansible/issues/349)] +- ntnx_karbon_clusters - [Bug] impossible to deploy NKE cluster with etcd using disk smaller than 120GB [[\#350](https://github.com/nutanix/nutanix.ansible/issues/350)] +- ntnx_subnets - [Bug] wrong virtual_switch selected in module ntnx_subnets [\#328](https://github.com/nutanix/nutanix.ansible/issues/328) + +New Modules +----------- + +- ntnx_karbon_clusters_node_pools - Create,Update and Delete a worker node pools with the provided configuration. +- ntnx_ndb_tags_info - info module for ndb tags info + v1.8.0 ====== diff --git a/README.md b/README.md index ea8f44acd..3e59c706a 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,10 @@ It is designed keeping simplicity as the core value. Hence it is Checkout this [blog](https://www.nutanix.dev/2022/08/05/getting-started-with-the-nutanix-ansible-module/) for getting started with nutanix ansible module. +## Support + +Ansible Nutanix Provider leverages the community-supported model. See [Open Source Support](https://portal.nutanix.com/page/documents/kbs/details?targetId=kA07V000000LdWPSA0) for more information about its support policy. + # Version compatibility ## Ansible @@ -33,7 +37,10 @@ This collection requires Python 2.7 or greater > For the 1.7.0 release of the ansible plugin it will have N-2 compatibility with the Prism Central APIs. This release was tested against Prism Central versions pc.2022.6, pc.2022.4 and pc2022.1.0.2. -> For the 1.8.0-beta.1 release of the ansible plugin it will have N compatibility with the Prism Central APIs. This release was tested against Prism Central version pc.2022.6 . +> For the 1.8.0 release of the ansible plugin it will have N compatibility with the Prism Central APIs. This release was tested against Prism Central version pc.2022.6 . + +> For the 1.9.0 release of the ansible plugin it will have N-1 compatibility with the Prism Central APIs. This release was tested against Prism Central version pc.2023.1 and pc.2023.1.0.1 . + ### Notes: 1. Static routes module (ntnx_static_routes) is supported for PC versions >= pc.2022.1 @@ -41,6 +48,8 @@ This collection requires Python 2.7 or greater 3. For Users and User Groups modules (ntnx_users and ntnx_user_groups), adding Identity Provider (IdP) & Organizational Unit (OU) based users/groups are supported for PC versions >= pc.2022.1 +4. ntnx_security_rules - The ``apptier`` option in target group has been removed. New option called ``apptiers`` has been added to support multi tier policy. + Prism Central based examples: https://github.com/nutanix/nutanix.ansible/tree/main/examples/ ## Foundation @@ -56,12 +65,15 @@ Foundation Central based examples : https://github.com/nutanix/nutanix.ansible/t ## Karbon > For the 1.6.0 release of the ansible plugin, it will have N-2 compatibility with the Karbon. This release was tested against Karbon versions v2.3.0, v2.4.0 and v2.5.0 +> For the 1.9.0 release of the ansible plugin, it was tested against Karbon versions v2.6.0, v2.7.0 and v2.8.0 + Karbon based examples : https://github.com/nutanix/nutanix.ansible/tree/main/examples/karbon ## Nutanix Database Service (ERA) -> For the 1.8.0-beta.1 release of the ansible plugin, it will have N-1 compatibility with the Nutanix Database Service (ERA). This release was tested against era versions v2.4.1 and v2.4.0 -> For the 1.8.0 release of the ansible plugin, it will have N-1 compatibility with the Nutanix Database Service (ERA). This release was tested against era versions v2.5.0 and v2.5.1 +> For the 1.8.0 release of the ansible plugin, it will have N-1 compatibility with the Nutanix Database Service (ERA). This release was tested against NDB versions v2.5.0 and v2.5.1 + +> For the 1.9.0 release of the ansible plugin, it was tested against NDB versions v2.5.0.2 NDB based examples : https://github.com/nutanix/nutanix.ansible/tree/main/examples/ndb @@ -150,6 +162,7 @@ ansible-playbook examples/iaas/iaas.yml | ntnx_image_placement_policies_info | List existing image placement policies. | | ntnx_karbon_clusters | Create, Delete k8s clusters | | ntnx_karbon_clusters_info | Get clusters info. | +| ntnx_karbon_clusters_node_pools | Update node pools of kubernetes cluster | | ntnx_karbon_registries | Create, Delete a karbon private registry entry | | ntnx_karbon_registries_info | Get karbon private registry registry info. | | ntnx_pbrs | Create or delete a PBR. | @@ -211,6 +224,7 @@ ansible-playbook examples/iaas/iaas.yml | ntnx_ndb_stretched_vlans | Get stretched vlans inf in NDB | | ntnx_ndb_time_machine_clusters | Manage clusters in NDB time machines | | ntnx_ndb_tags | Create, update and delete tags | +| ntnx_ndb_tags_info | Get tags info | | ntnx_ndb_database_clones | Create, update and delete database clones | | ntnx_ndb_database_snapshots | Create, update and delete database snapshots | | ntnx_ndb_database_clone_refresh | Perform database clone refresh | diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index d8e442afb..628bfed57 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -387,3 +387,33 @@ releases: name: ntnx_ndb_time_machines_info namespace: '' release_date: '2022-10-20' + 1.9.0: + changes: + bugfixes: + - info modules - [Bug] Multiple filters params are not considered for fetching + entities in PC based info modules [[\#352](https://github.com/nutanix/nutanix.ansible/issues/352)] + - ntnx_foundation - [Bug] clusters parameters not being passed to Foundation + Server in module nutanix.ncp.ntnx_foundation [[\#307](https://github.com/nutanix/nutanix.ansible/issues/307)] + - ntnx_karbon_clusters - [Bug] error in sample karbon/create_k8s_cluster.yml + [[\#349](https://github.com/nutanix/nutanix.ansible/issues/349)] + - ntnx_karbon_clusters - [Bug] impossible to deploy NKE cluster with etcd using + disk smaller than 120GB [[\#350](https://github.com/nutanix/nutanix.ansible/issues/350)] + - ntnx_subnets - [Bug] wrong virtual_switch selected in module ntnx_subnets + [\#328](https://github.com/nutanix/nutanix.ansible/issues/328) + deprecated_features: + - ntnx_security_rules - The ``apptier`` option in target group has been removed. + New option called ``apptiers`` has been added to support multi tier policy. + minor_changes: + - ntnx_profiles_info - [Impr] Develop ansible module for getting available IPs + for given network profiles in NDB [\#345](https://github.com/nutanix/nutanix.ansible/issues/345) + - ntnx_security_rules - [Imprv] Flow Network Security Multi-Tier support in + Security Policy definition [\#319](https://github.com/nutanix/nutanix.ansible/issues/319) + modules: + - description: Create,Update and Delete a worker node pools with the provided + configuration. + name: ntnx_karbon_clusters_node_pools + namespace: '' + - description: info module for ndb tags info + name: ntnx_ndb_tags_info + namespace: '' + release_date: '2023-07-11' diff --git a/examples/karbon/create_k8s_cluster.yml b/examples/karbon/create_k8s_cluster.yml index 08c919fe3..e4740b2d0 100644 --- a/examples/karbon/create_k8s_cluster.yml +++ b/examples/karbon/create_k8s_cluster.yml @@ -20,7 +20,9 @@ name: uuid: storage_class: - name: + name: + storage_container: + name: cni: node_cidr_mask_size: 24 service_ipv4_cidr: "172.19.0.0/16" @@ -53,9 +55,9 @@ nutanix_cluster_password: "{{nutanix_cluster_password}}" nutanix_cluster_username: "{{nutanix_cluster_username}}" default_storage_class: True - name: test-storage-class + name: "{{storage_class.name}}" reclaim_policy: Delete - storage_container: "{{storage_class.name}}" + storage_container: "{{storage_container.name}}" file_system: ext4 flash_mode: False register: result @@ -100,13 +102,42 @@ nutanix_cluster_password: "{{nutanix_cluster_password}}" nutanix_cluster_username: "{{nutanix_cluster_username}}" default_storage_class: True - name: test-storage-class + name: "{{storage_class.name}}" reclaim_policy: Retain - storage_container: "{{storage_class.name}}" + storage_container: "{{storage_container.name}}" file_system: xfs flash_mode: true register: result + - name: Create worker node pool with subnet uuid + ntnx_karbon_clusters_node_pools: + node_subnet: + uuid: "" + node_pool_name: "{{karbon_name}}" + cluster_name: "{{cluster.name}}" + pool_config: + num_instances: 2 + cpu: 4 + memory_gb: 8 + disk_gb: 120 + register: result + ignore_errors: true + + - name: update pool by increasing cpu,memory_gb,num_instances and add labels + ntnx_karbon_clusters_node_pools: + wait: True + node_pool_name: "{{karbon_name}}" + cluster_name: "{{cluster.name}}" + pool_config: + cpu: 6 + memory_gb: 10 + disk_gb: 150 + num_instances: 4 + add_labels: + property1: "test-property1" + register: result + ignore_errors: true + - name: create prod cluster ntnx_karbon_clusters: cluster: @@ -126,7 +157,7 @@ nutanix_cluster_password: "{{nutanix_cluster_password}}" nutanix_cluster_username: "{{nutanix_cluster_username}}" default_storage_class: True - name: test-storage-class + name: "{{storage_class.name}}" reclaim_policy: Delete storage_container: "{{storage_container.name}}" file_system: ext4 diff --git a/galaxy.yml b/galaxy.yml index 70d59ebe3..2ce0b0240 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: "nutanix" name: "ncp" -version: "1.8.0" +version: "1.9.0" readme: "README.md" authors: - "Abhishek Chaudhary (@abhimutant)" diff --git a/meta/runtime.yml b/meta/runtime.yml index 4ab2374cd..3a98b9fba 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -80,6 +80,7 @@ action_groups: - ntnx_ndb_stretched_vlans - ntnx_ndb_time_machine_clusters - ntnx_ndb_tags + - ntnx_ndb_tags_info - ntnx_ndb_database_clones - ntnx_ndb_database_snapshots - ntnx_ndb_database_clone_refresh @@ -95,3 +96,4 @@ action_groups: - ntnx_ndb_maintenance_window - ntnx_ndb_maintenance_windows_info - ntnx_ndb_slas + - ntnx_karbon_clusters_node_pools diff --git a/plugins/module_utils/entity.py b/plugins/module_utils/entity.py index a46672f34..a1dc79461 100644 --- a/plugins/module_utils/entity.py +++ b/plugins/module_utils/entity.py @@ -471,7 +471,7 @@ def unify_spec(self, spec1, spec2): @staticmethod def _parse_filters(filters): - return ",".join(map(lambda i: "{0}=={1}".format(i[0], i[1]), filters.items())) + return ";".join(map(lambda i: "{0}=={1}".format(i[0], i[1]), filters.items())) @staticmethod def _filter_entities(entities, custom_filters): diff --git a/plugins/module_utils/foundation/image_nodes.py b/plugins/module_utils/foundation/image_nodes.py index 41632d5ee..4e11760fb 100644 --- a/plugins/module_utils/foundation/image_nodes.py +++ b/plugins/module_utils/foundation/image_nodes.py @@ -111,33 +111,36 @@ def _build_spec_blocks(self, payload, blocks): payload["blocks"] = _blocks return payload, None - def _build_spec_cluster(self, payload, param): - clusters = [] - for cluster in param: + def _build_spec_cluster(self, payload, clusters): + cluster_specs = [] + for cluster in clusters: cluster_spec = self._get_default_cluster_spec(cluster) cluster_spec["cluster_name"] = cluster.get("name") cluster_spec["cluster_external_ip"] = cluster.get("cvm_vip", None) - if cluster_spec.get("cvm_ntp_servers"): + if cluster.get("cvm_ntp_servers"): cluster_spec["cvm_ntp_servers"] = self._list2str( cluster.get("cvm_ntp_servers") ) - if cluster_spec.get("cvm_dns_servers"): + if cluster.get("cvm_dns_servers"): cluster_spec["cvm_dns_servers"] = self._list2str( cluster.get("cvm_dns_servers") ) - if cluster_spec.get("hypervisor_ntp_servers"): + if cluster.get("hypervisor_ntp_servers"): cluster_spec["hypervisor_ntp_servers"] = self._list2str( cluster.get("hypervisor_ntp_servers") ) + if cluster.get("timezone"): + cluster_spec["timezone"] = cluster.get("timezone") + cluster_spec["cluster_members"] = cluster.get("cluster_members") - if len(cluster_spec["cluster_members"]) == 1: + if len(cluster["cluster_members"]) == 1: cluster_spec["single_node_cluster"] = True - clusters.append(cluster_spec) - payload["clusters"] = clusters + cluster_specs.append(cluster_spec) + payload["clusters"] = cluster_specs return payload, None def _build_spec_hypervisor_iso(self, payload, value): diff --git a/plugins/module_utils/karbon/clusters.py b/plugins/module_utils/karbon/clusters.py index 8f938b6a4..e244ca9cf 100644 --- a/plugins/module_utils/karbon/clusters.py +++ b/plugins/module_utils/karbon/clusters.py @@ -159,6 +159,7 @@ def validate_resources(resources, resource_type): min_cpu = 4 min_memory = 8 min_disk_size = 120 + min_etcd_disk_size = 40 err = "{0} cannot be less then {1}" if ( resource_type == "master" @@ -176,6 +177,10 @@ def validate_resources(resources, resource_type): return None, err.format("cpu", min_cpu) if resources["memory_gb"] < min_memory: return None, err.format("memory_gb", min_memory) - if resources["disk_gb"] < min_disk_size: - return None, err.format("disk_gb", min_disk_size) + if resource_type == "etcd": + if resources["disk_gb"] < min_etcd_disk_size: + return None, err.format("disk_gb", min_etcd_disk_size) + else: + if resources["disk_gb"] < min_disk_size: + return None, err.format("disk_gb", min_disk_size) return resources, None diff --git a/plugins/module_utils/karbon/node_pools.py b/plugins/module_utils/karbon/node_pools.py new file mode 100644 index 000000000..1980f8c2a --- /dev/null +++ b/plugins/module_utils/karbon/node_pools.py @@ -0,0 +1,176 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from copy import deepcopy + +from ..prism.subnets import get_subnet_uuid +from .clusters import Cluster + + +class NodePool(Cluster): + kind = "cluster" + + def __init__(self, module, resource_type="/v1-alpha.1/k8s/clusters"): + super(NodePool, self).__init__(module, resource_type=resource_type) + self.build_spec_methods = {} + + def _get_default_pool_spec(self): + return deepcopy( + { + "name": "", + "num_instances": 0, + "ahv_config": { + "cpu": 0, + "disk_mib": 0, + "memory_mib": 0, + "network_uuid": "", + "iscsi_network_uuid": "", + }, + } + ) + + def _build_pool_spec(self, payload, config): + payload["name"] = config["node_pool_name"] + if config.get("pool_config"): + payload["num_instances"] = config["pool_config"]["num_instances"] + payload["ahv_config"]["cpu"] = config["pool_config"]["cpu"] + payload["ahv_config"]["memory_mib"] = ( + config["pool_config"]["memory_gb"] * 1024 + ) + payload["ahv_config"]["disk_mib"] = config["pool_config"]["disk_gb"] * 1024 + subnet_uuid, err = get_subnet_uuid(config["node_subnet"], self.module) + if err: + return None, err + payload["ahv_config"]["network_uuid"] = subnet_uuid + if config.get("node_iscsi_subnet"): + iscsi_subnet_uuid, err = get_subnet_uuid( + config["node_iscsi_subnet"], self.module + ) + if err: + return None, err + payload["ahv_config"]["iscsi_network_uuid"] = iscsi_subnet_uuid + + return payload, None + + def get_pool_spec(self): + default_pool_spec = self._get_default_pool_spec() + error = self.validate_pool_resources() + if error: + return None, error + spec, error = self._build_pool_spec(default_pool_spec, self.module.params) + if error: + return None, error + return spec, None + + def get_labels_spec(self): + return deepcopy( + { + "add_labels": self.module.params.get("add_labels"), + "remove_labels": self.module.params.get("remove_labels"), + } + ) + + def add_node_pool(self, cluster_name, data=None): + + endpoint = "add-node-pool" + resp = self.update( + data=data, + uuid=cluster_name, + method="POST", + endpoint=endpoint, + ) + return resp + + def get_nodes_count(self, cluster_name, pool_name): + nodes_count = 0 + pool = self.get_node_pool(cluster_name, pool_name) + if pool: + nodes_count = len(pool.get("nodes")) + return nodes_count + + def remove_pool_nodes(self, cluster_name, pool_name): + nodes_count = self.get_nodes_count(cluster_name, pool_name) + resp = {} + if nodes_count: + spec = {"count": nodes_count} + endpoint = "node-pools/{0}/remove-nodes".format(pool_name) + resp = self.update( + data=spec, uuid=cluster_name, endpoint=endpoint, method="POST" + ) + return resp + + def remove_node_pool(self, cluster_name, pool_name): + + endpoint = "node-pools/{0}".format(pool_name) + resp = self.delete( + uuid=cluster_name, + endpoint=endpoint, + ) + return resp + + def read_node_pools(self, cluster_name): + + endpoint = "node-pools" + resp = self.read( + uuid=cluster_name, + endpoint=endpoint, + ) + return resp + + def get_node_pool(self, cluster_name, pool_name): + node_pools = self.read_node_pools(cluster_name) + for pool in node_pools: + if pool.get("name") == pool_name: + return pool + return None + + def update_nodes_count(self, cluster_name, pool_name, actual_count, expected_count): + residual_count = expected_count - actual_count + spec = {"count": abs(residual_count)} + if residual_count > 0: + resp = self.add_node(cluster_name, pool_name, spec) + else: + resp = self.remove_node(cluster_name, pool_name, spec) + return resp + + def add_node(self, cluster_name, pool_name, data=None): + + endpoint = "node-pools/{0}/add-nodes".format(pool_name) + resp = self.update( + data=data, + uuid=cluster_name, + endpoint=endpoint, + method="POST", + ) + return resp + + def remove_node(self, cluster_name, pool_name, data=None): + + endpoint = "node-pools/{0}/remove-nodes".format(pool_name) + resp = self.update( + data=data, + uuid=cluster_name, + endpoint=endpoint, + method="POST", + ) + return resp + + def update_labels(self, cluster_name, pool_name, data=None, raise_error=True): + + endpoint = "node-pools/{0}/update-labels".format(pool_name) + resp = self.update( + data=data, + uuid=cluster_name, + endpoint=endpoint, + method="POST", + raise_error=raise_error, + ) + return resp + + def validate_pool_resources(self): + if not self.module.params.get("node_subnet"): + return "missing required arguments: node_subnet" + return None diff --git a/plugins/module_utils/ndb/profiles/profile_types.py b/plugins/module_utils/ndb/profiles/profile_types.py index 19cccbc4d..1aadd8e54 100644 --- a/plugins/module_utils/ndb/profiles/profile_types.py +++ b/plugins/module_utils/ndb/profiles/profile_types.py @@ -96,6 +96,11 @@ def __init__(self, module): super(NetworkProfile, self).__init__(module) self._type = NDB.ProfileTypes.NETWORK + def get_available_ips(self, uuid): + endpoint = "{0}/get-available-ips".format(uuid) + resp = self.read(endpoint=endpoint) + return resp + def get_default_version_update_spec(self, override_spec=None): spec = { "name": "", diff --git a/plugins/module_utils/prism/groups.py b/plugins/module_utils/prism/groups.py index b259b7f68..b0c2cad3d 100644 --- a/plugins/module_utils/prism/groups.py +++ b/plugins/module_utils/prism/groups.py @@ -14,17 +14,19 @@ def __init__(self, module): def get_uuid( self, - value, + value="", key="name", data=None, entity_type="", raise_error=True, no_response=False, ): - data = { - "entity_type": entity_type, - "filter_criteria": "{0}=={1}".format(key, value), - } + if not data: + data = { + "entity_type": entity_type, + "filter_criteria": "{0}=={1}".format(key, value), + } + resp = self.list( data, use_base_url=True, raise_error=raise_error, no_response=no_response ) diff --git a/plugins/module_utils/prism/security_rules.py b/plugins/module_utils/prism/security_rules.py index c8a7d391a..779c2570c 100644 --- a/plugins/module_utils/prism/security_rules.py +++ b/plugins/module_utils/prism/security_rules.py @@ -119,8 +119,8 @@ def _build_spec_rule(self, payload, value): ] if categories.get("apptype"): params["AppType"] = [categories["apptype"]] - if categories.get("apptier"): - params["AppTier"] = [categories.get("apptier")] + if categories.get("apptiers"): + params["AppTier"] = categories.get("apptiers") if value["target_group"].get("default_internal_policy"): target_group["default_internal_policy"] = value["target_group"][ "default_internal_policy" diff --git a/plugins/module_utils/prism/subnets.py b/plugins/module_utils/prism/subnets.py index 4b501d9c6..1eee1e030 100644 --- a/plugins/module_utils/prism/subnets.py +++ b/plugins/module_utils/prism/subnets.py @@ -46,15 +46,18 @@ def _build_spec_vlan_subnet(self, payload, config): payload["spec"]["resources"]["vlan_id"] = config["vlan_id"] payload["spec"]["resources"]["is_external"] = False - dvs_uuid, error = get_dvs_uuid(config["virtual_switch"], self.module) - if error: - return None, error - payload["spec"]["resources"]["virtual_switch_uuid"] = dvs_uuid cluster_uuid, error = get_cluster_uuid(config["cluster"], self.module) if error: return None, error payload["spec"]["cluster_reference"] = self._get_cluster_ref_spec(cluster_uuid) + dvs_uuid, error = get_dvs_uuid( + config["virtual_switch"], self.module, cluster_uuid=cluster_uuid + ) + if error: + return None, error + payload["spec"]["resources"]["virtual_switch_uuid"] = dvs_uuid + if "ipam" in config: payload["spec"]["resources"]["ip_config"] = self._get_ipam_spec(config) diff --git a/plugins/module_utils/prism/virtual_switches.py b/plugins/module_utils/prism/virtual_switches.py index 7012bf424..4f6b1f7be 100644 --- a/plugins/module_utils/prism/virtual_switches.py +++ b/plugins/module_utils/prism/virtual_switches.py @@ -4,16 +4,26 @@ __metaclass__ = type +from ..utils import create_filter_criteria_string from .groups import Groups # Helper functions -def get_dvs_uuid(config, module): +def get_dvs_uuid(config, module, cluster_uuid=None): if "name" in config: groups = Groups(module) name = config["name"] - uuid = groups.get_uuid(value=name, entity_type="distributed_virtual_switch") + filters = { + "name": name, + "cluster_configuration_list.cluster_uuid": cluster_uuid, + } + + data = { + "entity_type": "distributed_virtual_switch", + "filter_criteria": create_filter_criteria_string(filters), + } + uuid = groups.get_uuid(data=data) if not uuid: error = "Virtual Switch {0} not found.".format(name) return None, error diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index 0871df63e..37eb139d7 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -109,3 +109,22 @@ def format_filters_map(filters, except_keys=None): mapped_filters.update({key: value}) filters = mapped_filters return filters + + +def create_filter_criteria_string(filters): + """ + This method creates filter criteria string as per filters map for v3 apis + example filter criteria format: "name==test_name;cluster_uuid=test_uuid" + """ + filter_criteria = "" + if not filters: + return filter_criteria + + for key, val in filters.items(): + if val: + filter_criteria = filter_criteria + "{0}=={1};".format(key, val) + + # remove ";" from ending + filter_criteria = filter_criteria[:-1] + + return filter_criteria diff --git a/plugins/modules/ntnx_karbon_clusters.py b/plugins/modules/ntnx_karbon_clusters.py index da91925f7..aeb1b64ec 100644 --- a/plugins/modules/ntnx_karbon_clusters.py +++ b/plugins/modules/ntnx_karbon_clusters.py @@ -86,7 +86,7 @@ default: 4 disk_gb: type: int - description: Size of local storage for each VM on the PE cluster in GiB. + description: Size of local storage for each VM on the PE cluster in GiB and the minimum size is 40 GIB. default: 120 memory_gb: type: int diff --git a/plugins/modules/ntnx_karbon_clusters_node_pools.py b/plugins/modules/ntnx_karbon_clusters_node_pools.py new file mode 100644 index 000000000..bb51d6ba3 --- /dev/null +++ b/plugins/modules/ntnx_karbon_clusters_node_pools.py @@ -0,0 +1,363 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_karbon_clusters_node_pools +short_description: Create,Update and Delete a worker node pools with the provided configuration. +version_added: 1.9.0 +description: "Create, update and and Delete worker node pools" +options: + cluster_name: + type: str + description: Unique name of the k8s node_pool. + required: true + node_pool_name: + type: str + description: + - Unique name of the k8s cluster's node pool. + - We can create, update or delete by using the name + required: true + node_subnet: + type: dict + description: Configuration of the node pools that the nodes in the etcd,workers,master cluster belong to + suboptions: + name: + type: str + description: + - Subnet name + - Mutually exclusive with C(uuid) + uuid: + type: str + description: + - Subnet UUID + - Mutually exclusive with C(name) + node_iscsi_subnet: + type: dict + description: + - Configuration of the node pools that the nodes in the etcd,workers,master cluster belong to + suboptions: + name: + type: str + description: + - Subnet name + - Mutually exclusive with C(uuid) + uuid: + type: str + description: + - Subnet UUID + - Mutually exclusive with C(name) + pool_config: + type: dict + description: + - Configuration of the node pools that the workers belong to. + - The worker nodes require a minimum of 8,192 MiB memory and 122,880 MiB disk space. + - disk space >=120880 Mib + - Memory >= 8192 Mib + suboptions: + num_instances: + type: int + default: 1 + description: Number of nodes in the node pool. + cpu: + type: int + description: The number of VCPUs allocated for each VM on the PE node_pool. + default: 4 + disk_gb: + type: int + description: Size of local storage for each VM on the PE cluster in GiB. + default: 120 + memory_gb: + type: int + description: Memory allocated for each VM on the PE cluster in GiB. + default: 8 + add_labels: + type: dict + description: Map of user-provided labels for the nodes in the node pool. + remove_labels: + type: list + description: Map of user-provided labels for the nodes in the node pool to remove. + elements: str +extends_documentation_fragment: + - nutanix.ncp.ntnx_credentials + - nutanix.ncp.ntnx_operations +author: + - Prem Karat (@premkarat) + - Gevorg Khachatryan (@Gevorg-Khachatryan-97) + - Alaa Bishtawi (@alaa-bish) +""" + +EXAMPLES = r""" +- name: Create node pool with subnet uuid + ntnx_karbon_clusters_node_pools: + node_subnet: + uuid: "" + node_pool_name: "node_pool_name" + cluster_name: "cluster_name" + pool_config: + num_instances: 2 + cpu: 4 + memory_gb: 8 + disk_gb: 120 + register: result + ignore_errors: true + +- name: update pool by increasing cpu,memory_gb,num_instances and add labels + ntnx_karbon_clusters_node_pools: + wait: True + node_pool_name: "node_name" + cluster_name: "cluster_name" + pool_config: + cpu: 6 + memory_gb: 10 + disk_gb: 150 + num_instances: 4 + add_labels: + property1: "test-property1" + register: result + ignore_errors: true +""" + +RETURN = r""" +response: + description: List of node worker pools + returned: always + type: dict + sample: { + "ahv_config": { + "cpu": 8, + "disk_mib": 122880, + "memory_mib": 8192, + "network_name": "", + "network_uuid": "", + "prism_element_cluster_uuid": "" + }, + "assigned_gpu_config_list": [], + "category": "worker", + "default": true, + "labels": { + "nke-default": "true" + }, + "name": "test-module21-worker-pool", + "node_os_version": "ntnx-1.5", + "nodes": [ + { + "hostname": "test-module21-b5fe00-worker-0", + "ipv4_address": "" + } + ], + "num_instances": 1 + } +cluster_name: + description: kubernetes cluster name + returned: always + type: str + sample: "test_cluster" +node_pool_name: + description: worker node pool name + returned: always + type: str + sample: "node_pool_A" +""" + +from ..module_utils import utils # noqa: E402 +from ..module_utils.base_module import BaseModule # noqa: E402 +from ..module_utils.karbon.node_pools import NodePool # noqa: E402 +from ..module_utils.prism.tasks import Task # noqa: E402 + + +def get_module_spec(): + mutually_exclusive = [("name", "uuid")] + + entity_by_spec = dict(name=dict(type="str"), uuid=dict(type="str")) + + resource_spec = dict( + num_instances=dict(type="int", default=1), + cpu=dict(type="int", default=4), + memory_gb=dict(type="int", default=8), + disk_gb=dict(type="int", default=120), + ) + + module_args = dict( + cluster_name=dict(type="str", required=True), + node_pool_name=dict(type="str", required=True), + pool_config=dict(type="dict", apply_defaults=True, options=resource_spec), + node_subnet=dict( + type="dict", options=entity_by_spec, mutually_exclusive=mutually_exclusive + ), + node_iscsi_subnet=dict( + type="dict", options=entity_by_spec, mutually_exclusive=mutually_exclusive + ), + add_labels=dict(type="dict"), + remove_labels=dict(type="list", elements="str"), + ) + + return module_args + + +def create_pool(module, result): + node_pool = NodePool(module) + cluster_name = module.params["cluster_name"] + pool_name = module.params["node_pool_name"] + + pool = node_pool.get_node_pool(cluster_name, pool_name) + if pool: + update_pool(module, result, pool) + return + + spec, error = node_pool.get_pool_spec() + if error: + result["error"] = error + module.fail_json(msg="Failed generating create pool spec", **result) + + if module.check_mode: + result["response"] = spec + return + + resp = node_pool.add_node_pool(cluster_name, spec) + task_uuid = resp["task_uuid"] + result["cluster_name"] = cluster_name + result["node_pool_name"] = pool_name + result["changed"] = True + + if module.params.get("wait"): + task = Task(module) + task.wait_for_completion(task_uuid) + resp = node_pool.get_node_pool(cluster_name, pool_name) + + result["response"] = resp + + +def update_pool(module, result, pool=None): + node_pool = NodePool(module) + cluster_name = module.params["cluster_name"] + pool_name = module.params["node_pool_name"] + nodes_expected_count = module.params.get("pool_config", {}).get("num_instances") + nodes_actual_count = len(pool.get("nodes", [])) + add_labels = module.params.get("add_labels") + remove_labels = module.params.get("remove_labels") + wait = module.params.get("wait") + nothing_to_change = False + + if not (nodes_expected_count or add_labels or remove_labels): + result["error"] = ( + "Missing parameter in playbook." + "One of attributes pool_config.num_instances|add_labels|remove_labels is required" + ) + module.fail_json(msg="Failed updating node pool", **result) + + # resize pool + if nodes_expected_count: + if nodes_expected_count != nodes_actual_count: + resp = node_pool.update_nodes_count( + cluster_name, pool_name, nodes_actual_count, nodes_expected_count + ) + task_uuid = resp.get("task_uuid") + result["nodes_update_response"] = resp + result["changed"] = True + if task_uuid and wait: + task = Task(module) + task.wait_for_completion(task_uuid) + else: + nothing_to_change = True + + # update labels + if add_labels or remove_labels: + labels_spec = node_pool.get_labels_spec() + if module.check_mode: + result["response"] = labels_spec + return + raise_error = False if result["changed"] else True + resp = node_pool.update_labels( + cluster_name, pool_name, labels_spec, raise_error + ) + result["labels_update_response"] = resp + task_uuid = resp.get("task_uuid") + + if task_uuid: + result["changed"] = True + if wait: + task = Task(module) + resp = task.wait_for_completion(task_uuid, raise_error) + state = resp.get("status") + if state == "FAILED": + result["skipped"] = True + result["error"] = resp.get("error_detail") + else: + result["skipped"] = True + else: + if nothing_to_change: + result["skipped"] = True + module.exit_json(msg="Nothing to change.") + + pool = node_pool.get_node_pool(cluster_name, pool_name) + result["response"] = pool + result["cluster_name"] = cluster_name + result["node_pool_name"] = pool_name + + +def delete_nodes_of_pool(module, result): + cluster_name = module.params["cluster_name"] + pool_name = module.params["node_pool_name"] + + node_pool = NodePool(module) + resp = node_pool.remove_pool_nodes(cluster_name, pool_name) + result["changed"] = True + task_uuid = resp.get("task_uuid") + + if task_uuid: + task = Task(module) + task.wait_for_completion(task_uuid) + + +def delete_pool(module, result): + cluster_name = module.params["cluster_name"] + pool_name = module.params["node_pool_name"] + + delete_nodes_of_pool(module, result) + + node_pool = NodePool(module, resource_type="/v1-beta.1/k8s/clusters") + resp = node_pool.remove_node_pool(cluster_name, pool_name) + + result["changed"] = True + result["cluster_name"] = cluster_name + result["node_pool_name"] = pool_name + task_uuid = resp["task_uuid"] + result["task_uuid"] = task_uuid + + if module.params.get("wait"): + task = Task(module) + resp = task.wait_for_completion(task_uuid) + + result["response"] = resp + + +def run_module(): + module = BaseModule( + argument_spec=get_module_spec(), + supports_check_mode=True, + required_if=[("state", "absent", ("cluster_name", "node_pool_name"))], + ) + utils.remove_param_with_none_value(module.params) + result = {"response": {}, "error": None, "changed": False} + state = module.params["state"] + if state == "present": + create_pool(module, result) + else: + delete_pool(module, result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_ndb_profiles_info.py b/plugins/modules/ntnx_ndb_profiles_info.py index cf08eb53a..f3a354dc3 100644 --- a/plugins/modules/ntnx_ndb_profiles_info.py +++ b/plugins/modules/ntnx_ndb_profiles_info.py @@ -46,6 +46,13 @@ - filter as per profile type type: str choices: ["Software","Compute","Network","Database_Parameter",] + include_available_ips: + description: + - include available ips for each subnet in response + - only to be used for network profiles having NDB managed subnets + - only to be used for fetching profile using C(name) or C(uuid) + default: false + type: bool extends_documentation_fragment: - nutanix.ncp.ntnx_ndb_info_base_module author: @@ -178,6 +185,7 @@ """ from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.ndb.profiles.profile_types import NetworkProfile # noqa: E402 from ..module_utils.ndb.profiles.profiles import Profile # noqa: E402 from ..module_utils.utils import remove_param_with_none_value # noqa: E402 @@ -217,6 +225,7 @@ def get_module_spec(): type="dict", options=filters_spec, ), + include_available_ips=dict(type="bool", default=False), ) return module_args @@ -230,6 +239,11 @@ def get_profile(module, result): result["response"] = resp + if module.params.get("include_available_ips", False): + uuid = resp.get("id") + ntw_profile = NetworkProfile(module) + result["response"]["available_ips"] = ntw_profile.get_available_ips(uuid=uuid) + def get_profiles(module, result): profile = Profile(module) diff --git a/plugins/modules/ntnx_ndb_tags_info.py b/plugins/modules/ntnx_ndb_tags_info.py new file mode 100644 index 000000000..acedab100 --- /dev/null +++ b/plugins/modules/ntnx_ndb_tags_info.py @@ -0,0 +1,192 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Prem Karat +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" +--- +module: ntnx_ndb_tags_info +short_description: info module for ndb tags info +version_added: 1.9.0 +description: info module for ndb tags +options: + uuid: + description: + - tags uuid + type: str + name: + description: + - get tags based on name + - since there can be multiple tags with same name, + use C(entity_type) in C(filters) to get correct tag info + type: str + filters: + description: + - filters spec + type: dict + suboptions: + entity_type: + description: + - get all tags based on entity_type + type: str + choices: ["DATABASE","CLONE","TIME_MACHINE","DATABASE_SERVER",] +extends_documentation_fragment: + - nutanix.ncp.ntnx_ndb_info_base_module +author: + - Prem Karat (@premkarat) + - Pradeepsingh Bhati (@bhati-pradeep) +""" + +EXAMPLES = r""" +- name: get all tags + ntnx_ndb_tags_info: + register: result + +- name: get tag based on uuid + ntnx_ndb_tags_info: + uuid: "{{database_tag_uuid}}" + register: result + +- name: get all tags based on DATABASE entity type + ntnx_ndb_tags_info: + filters: + entity_type: "DATABASE" + register: result + +- name: get all tags based on CLONE entity type + ntnx_ndb_tags_info: + filters: + entity_type: "CLONE" + register: result + +- name: get tag based on DATABASE entity type and name + ntnx_ndb_tags_info: + filters: + entity_type: "DATABASE" + name: "tag_name" + register: result +""" + +RETURN = r""" +response: + description: listing all tags + returned: only when name and uuid is not used for fetching specific tag + type: list + sample: [ + { + "dateCreated": "2023-02-24 08:14:05", + "dateModified": "2023-02-24 08:14:05", + "entityType": "DATABASE", + "id": "aba66aab-e73e-40c3-92bc-fabd2b449475", + "name": "ansible-databases", + "owner": "eac70dbf-22fb-462b-9498-949796ca1f73", + "required": false, + "status": "ENABLED", + "values": 2 + }, + { + "dateCreated": "2023-02-24 08:13:48", + "dateModified": "2023-02-24 08:13:48", + "entityType": "CLONE", + "id": "ca15bae2-cf47-4b64-829f-30a1f6f55b62", + "name": "ansible-clones", + "owner": "eac70dbf-22fb-462b-9498-949796ca1f73", + "required": false, + "status": "ENABLED", + "values": 0 + }, + ] +""" + +from ..module_utils.ndb.base_info_module import NdbBaseInfoModule # noqa: E402 +from ..module_utils.ndb.tags import Tag # noqa: E402 + + +def get_module_spec(): + + filters_spec = dict( + entity_type=dict( + type="str", + choices=[ + "DATABASE", + "TIME_MACHINE", + "CLONE", + "DATABASE_SERVER", + ], + ), + ) + + module_args = dict( + uuid=dict(type="str"), + name=dict(type="str"), + filters=dict( + type="dict", + options=filters_spec, + ), + ) + + return module_args + + +def format_tags_filters(filters): + """ + This routine returns filter spec with attribute name changes as acceptable by api + """ + attrs = {"entity_type": "entityType"} + + updated_filters = {} + for key in filters.keys(): + if attrs.get(key): + updated_filters[attrs.get(key)] = filters[key] + return updated_filters + + +def get_tags(module, result): + tags = Tag(module) + filters = module.params.get("filters") + if filters: + filters = format_tags_filters(filters) + + # fetch tag using uuid + if module.params.get("uuid"): + resp = tags.read(uuid=module.params.get("uuid")) + + # fetch tag using name and entity type (optional) + elif module.params.get("name"): + entity_type = filters.get("entityType") + uuid, err = tags.get_tag_uuid( + name=module.params.get("name"), entity_type=entity_type + ) + if err: + result["error"] = err + return module.fail_json(msg="Failed fetching tag info", **result) + resp = tags.read(uuid=uuid) + + # fetch all tags + else: + resp = tags.read(query=filters) + + result["response"] = resp + + +def run_module(): + module = NdbBaseInfoModule( + argument_spec=get_module_spec(), + supports_check_mode=False, + mutually_exclusive=[("uuid", "filters")], + ) + result = {"changed": False, "error": None, "response": None} + get_tags(module, result) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/ntnx_security_rules.py b/plugins/modules/ntnx_security_rules.py index 6f712a21e..a6c92e5de 100644 --- a/plugins/modules/ntnx_security_rules.py +++ b/plugins/modules/ntnx_security_rules.py @@ -102,9 +102,12 @@ apptype_filter_by_category: description: A category key and value. type: dict - apptier: - description: A category value. - type: str + apptiers: + description: + - List of AppTier category values + - C(apptier) is deprecated + type: list + elements: str adgroup: description: - A category value. @@ -357,9 +360,12 @@ apptype_filter_by_category: description: A category key and value. type: dict - apptier: - description: A category value. - type: str + apptiers: + description: + - List of AppTier category values + - C(apptier) is deprecated + type: list + elements: str adgroup: description: - A category value. @@ -612,9 +618,12 @@ apptype_filter_by_category: description: A category key and value. type: dict - apptier: - description: A category value. - type: str + apptiers: + description: + - List of AppTier category values + - C(apptier) is deprecated + type: list + elements: str adgroup: description: - A category value. @@ -1105,7 +1114,7 @@ def get_module_spec(): categories_spec = dict( apptype=dict(type="str"), apptype_filter_by_category=dict(type="dict"), - apptier=dict(type="str"), + apptiers=dict(type="list", elements="str"), adgroup=dict(type="str"), ) diff --git a/tests/integration/targets/ntnx_acps/aliases b/tests/integration/targets/ntnx_acps/aliases index 87e7bdaae..e69de29bb 100644 --- a/tests/integration/targets/ntnx_acps/aliases +++ b/tests/integration/targets/ntnx_acps/aliases @@ -1 +0,0 @@ -disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_acps/tasks/create_acps.yml b/tests/integration/targets/ntnx_acps/tasks/create_acps.yml index 92ff0d84f..ea7478d17 100644 --- a/tests/integration/targets/ntnx_acps/tasks/create_acps.yml +++ b/tests/integration/targets/ntnx_acps/tasks/create_acps.yml @@ -181,7 +181,7 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created acp's" - success_msg: "All acp's deleted succesfully" + success_msg: "All acp's deleted successfully" - set_fact: todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_acps/tasks/update_acps.yml b/tests/integration/targets/ntnx_acps/tasks/update_acps.yml index 03ddc706e..d8678eaa4 100644 --- a/tests/integration/targets/ntnx_acps/tasks/update_acps.yml +++ b/tests/integration/targets/ntnx_acps/tasks/update_acps.yml @@ -44,7 +44,7 @@ - result.failed == false - "'Nothing to change' in result.msg" fail_msg: "Fail: ACP got updated" - success_msg: "Pass: ACP update skipped succesfully due to no changes in spec" + success_msg: "Pass: ACP update skipped successfully due to no changes in spec" ######################################################################################## @@ -118,7 +118,7 @@ - result.failed == false - "'Nothing to change' in result.msg" fail_msg: "Fail: ACP got updated" - success_msg: "Pass: ACP update skipped succesfully due to no changes in spec" + success_msg: "Pass: ACP update skipped successfully due to no changes in spec" ########################################### Cleanup ################################################### @@ -139,7 +139,7 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created acp's" - success_msg: "All acp's deleted succesfully" + success_msg: "All acp's deleted successfully" - set_fact: todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_acps_info/aliases b/tests/integration/targets/ntnx_acps_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_acps_info/aliases +++ b/tests/integration/targets/ntnx_acps_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_address_groups/aliases b/tests/integration/targets/ntnx_address_groups/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_address_groups/aliases +++ b/tests/integration/targets/ntnx_address_groups/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_address_groups_info/aliases b/tests/integration/targets/ntnx_address_groups_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_address_groups_info/aliases +++ b/tests/integration/targets/ntnx_address_groups_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_categories/aliases b/tests/integration/targets/ntnx_categories/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_categories/aliases +++ b/tests/integration/targets/ntnx_categories/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_categories/tasks/all_operations.yml b/tests/integration/targets/ntnx_categories/tasks/all_operations.yml index e8b0aa0b0..655505426 100644 --- a/tests/integration/targets/ntnx_categories/tasks/all_operations.yml +++ b/tests/integration/targets/ntnx_categories/tasks/all_operations.yml @@ -104,7 +104,7 @@ - result.response.category_values.entities|length == 1 - result.response.category_values.entities.0.value == "{{values.0}}" fail_msg: "Fail: unable to update existing category by deleting some values " - success_msg: "Passed: update existing category by deleting some values finished succesfully" + success_msg: "Passed: update existing category by deleting some values finished successfully" ################# - name: update existing category by deleting all values ntnx_categories: @@ -129,7 +129,7 @@ - result.response is defined - result.response.category_values.entities|length == 0 fail_msg: "Fail: unable to update existing category by deleting all values " - success_msg: "Passed: update existing category by deleting all values finished succesfully" + success_msg: "Passed: update existing category by deleting all values finished successfully" ################# - name: Delte the category ntnx_categories: @@ -151,7 +151,7 @@ - result.failed == false - result.response == {} fail_msg: "Fail: unable to Delete the category " - success_msg: "Passed: Delete the category finished succesfully" + success_msg: "Passed: Delete the category finished successfully" ################# - name: Create category key and value together with check_mode diff --git a/tests/integration/targets/ntnx_categories_info/aliases b/tests/integration/targets/ntnx_categories_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_categories_info/aliases +++ b/tests/integration/targets/ntnx_categories_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_clusters_info/aliases b/tests/integration/targets/ntnx_clusters_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_clusters_info/aliases +++ b/tests/integration/targets/ntnx_clusters_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml index 74071ab6b..1ad06894b 100644 --- a/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml +++ b/tests/integration/targets/ntnx_foundation_central_imaged_nodes_info/tasks/get_node_info.yml @@ -31,7 +31,7 @@ - result.response is defined - result.response.node_serial == nodes.response.imaged_nodes.0.node_serial fail_msg: "fail: unable to get node by uuid" - success_msg: "succes: get node by uuid succesfully " + success_msg: "succes: get node by uuid successfully " - name: get imaged node using custom filter ntnx_foundation_central_imaged_nodes_info: diff --git a/tests/integration/targets/ntnx_foundation_sanity/aliases b/tests/integration/targets/ntnx_foundation_sanity/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/ntnx_foundation_sanity/meta/main.yml b/tests/integration/targets/ntnx_foundation_sanity/meta/main.yml new file mode 100644 index 000000000..8d7d13401 --- /dev/null +++ b/tests/integration/targets/ntnx_foundation_sanity/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_foundation_env diff --git a/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml b/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml new file mode 100644 index 000000000..49e4b651b --- /dev/null +++ b/tests/integration/targets/ntnx_foundation_sanity/tasks/image_nodes.yml @@ -0,0 +1,172 @@ +--- + - debug: + msg: start testing ntnx_foundation test for bare metal imaging and cluster creation + + + - name: get aos_packages_info from foundation + ntnx_foundation_aos_packages_info: + register: images + + - name: Create spec for imaging and creating cluster out of bare metal nodes + check_mode: yes + ntnx_foundation: + timeout: 4500 + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "{{nodes.current_cvm_vlan_tag}}" + nos_package: "{{images.aos_packages[0]}}" + blocks: + - block_id: "{{nodes.block_id}}" + nodes: + - manual_mode: + cvm_ip: "{{nodes.node1.cvm_ip}}" + cvm_gb_ram: 50 + hypervisor_hostname: "{{nodes.node1.hypervisor_hostname}}" + ipmi_netmask: "{{nodes.node1.ipmi_netmask}}" + ipmi_gateway: "{{nodes.node1.ipmi_gateway}}" + ipmi_ip: "{{nodes.node1.ipmi_ip}}" + ipmi_password: "{{nodes.node1.ipmi_password}}" + hypervisor: "{{nodes.node1.hypervisor}}" + hypervisor_ip: "{{nodes.node1.hypervisor_ip}}" + node_position: "{{nodes.node1.node_position}}" + clusters: + - redundancy_factor: 1 + cluster_members: + - "{{nodes.node1.cvm_ip}}" + name: "test-cluster" + timezone: "Asia/Calcutta" + cvm_ntp_servers: + - "{{nodes.ntp_servers[0]}}" + - "{{nodes.ntp_servers[1]}}" + cvm_dns_servers: + - "{{nodes.dns_servers[0]}}" + - "{{nodes.dns_servers[1]}}" + hypervisor_ntp_servers: + - "{{nodes.ntp_servers[0]}}" + - "{{nodes.ntp_servers[1]}}" + enable_ns: true + backplane_vlan: "{{nodes.backplane_vlan}}" + backplane_subnet: "{{nodes.backplane_subnet}}" + backplane_netmask: "{{nodes.backplane_netmask}}" + register: spec + ignore_errors: True + + - set_fact: + expected_spec: { + "blocks": [ + { + "block_id": "{{nodes.block_id}}", + "nodes": [ + { + "cvm_gb_ram": 50, + "cvm_ip": "{{nodes.node1.cvm_ip}}", + "hypervisor": "{{nodes.node1.hypervisor}}", + "hypervisor_hostname": "{{nodes.node1.hypervisor_hostname}}", + "hypervisor_ip": "{{nodes.node1.hypervisor_ip}}", + "image_now": true, + "ipmi_gateway": "{{nodes.node1.ipmi_gateway}}", + "ipmi_ip": "{{nodes.node1.ipmi_ip}}", + "ipmi_netmask": "{{nodes.node1.ipmi_netmask}}", + "ipmi_password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "node_position": "{{nodes.node1.node_position}}" + } + ] + } + ], + "clusters": [ + { + "backplane_netmask": "{{nodes.backplane_netmask}}", + "backplane_subnet": "{{nodes.backplane_subnet}}", + "backplane_vlan": "{{nodes.backplane_vlan}}", + "cluster_external_ip": null, + "cluster_init_now": true, + "cluster_members": [ + "{{nodes.node1.cvm_ip}}" + ], + "cluster_name": "test-cluster", + "cvm_dns_servers": "{{nodes.dns_servers[0]}},{{nodes.dns_servers[1]}}", + "cvm_ntp_servers": "{{nodes.ntp_servers[0]}},{{nodes.ntp_servers[1]}}", + "enable_ns": true, + "hypervisor_ntp_servers": "{{nodes.ntp_servers[0]}},{{nodes.ntp_servers[1]}}", + "redundancy_factor": 1, + "single_node_cluster": true, + "timezone": "Asia/Calcutta" + } + ], + "current_cvm_vlan_tag": "{{nodes.current_cvm_vlan_tag}}", + "cvm_gateway": "{{cvm_gateway}}", + "cvm_netmask": "{{cvm_netmask}}", + "hypervisor_gateway": "{{hypervisor_gateway}}", + "hypervisor_iso": {}, + "hypervisor_netmask": "{{hypervisor_netmask}}", + "ipmi_user": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "nos_package": "{{images.aos_packages[0]}}" + } + + - name: Verify spec + assert: + that: + - spec.response is defined + - spec.failed==false + - spec.changed==false + - spec.response == expected_spec + fail_msg: " Fail : unable to create spec for imaging nodes" + success_msg: "Succes: spec generated successfully" + + - name: Image nodes and create cluster out of it + ntnx_foundation: + timeout: 4500 + cvm_gateway: "{{cvm_gateway}}" + cvm_netmask: "{{cvm_netmask}}" + hypervisor_gateway: "{{hypervisor_gateway}}" + hypervisor_netmask: "{{hypervisor_netmask}}" + default_ipmi_user: "{{default_ipmi_user}}" + current_cvm_vlan_tag: "{{nodes.current_cvm_vlan_tag}}" + nos_package: "{{images.aos_packages[0]}}" + blocks: + - block_id: "{{nodes.block_id}}" + nodes: + - manual_mode: + cvm_ip: "{{nodes.node1.cvm_ip}}" + cvm_gb_ram: 50 + hypervisor_hostname: "{{nodes.node1.hypervisor_hostname}}" + ipmi_netmask: "{{nodes.node1.ipmi_netmask}}" + ipmi_gateway: "{{nodes.node1.ipmi_gateway}}" + ipmi_ip: "{{nodes.node1.ipmi_ip}}" + ipmi_password: "{{nodes.node1.ipmi_password}}" + hypervisor: "{{nodes.node1.hypervisor}}" + hypervisor_ip: "{{nodes.node1.hypervisor_ip}}" + node_position: "{{nodes.node1.node_position}}" + clusters: + - redundancy_factor: 1 + cluster_members: + - "{{nodes.node1.cvm_ip}}" + name: "test-cluster" + timezone: "Asia/Calcutta" + cvm_ntp_servers: + - "{{nodes.ntp_servers[0]}}" + - "{{nodes.ntp_servers[1]}}" + cvm_dns_servers: + - "{{nodes.dns_servers[0]}}" + - "{{nodes.dns_servers[1]}}" + hypervisor_ntp_servers: + - "{{nodes.ntp_servers[0]}}" + - "{{nodes.ntp_servers[1]}}" + register: result + no_log: true + ignore_errors: True + + - name: Creation Status + assert: + that: + - result.response is defined + - result.failed==false + - result.changed==true + - result.response.cluster_urls is defined + fail_msg: " Fail : unable to image nodes and create cluster" + success_msg: "Succes: cluster and node imaging done successfully" + +###################################################### \ No newline at end of file diff --git a/tests/integration/targets/ntnx_foundation_sanity/tasks/main.yml b/tests/integration/targets/ntnx_foundation_sanity/tasks/main.yml new file mode 100644 index 000000000..24fc4e925 --- /dev/null +++ b/tests/integration/targets/ntnx_foundation_sanity/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ foundation_host }}" + block: + - import_tasks: "image_nodes.yml" diff --git a/tests/integration/targets/ntnx_hosts_info/aliases b/tests/integration/targets/ntnx_hosts_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_hosts_info/aliases +++ b/tests/integration/targets/ntnx_hosts_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_image_placement_policies_info/aliases b/tests/integration/targets/ntnx_image_placement_policies_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_image_placement_policies_info/aliases +++ b/tests/integration/targets/ntnx_image_placement_policies_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_image_placement_policy/aliases b/tests/integration/targets/ntnx_image_placement_policy/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_image_placement_policy/aliases +++ b/tests/integration/targets/ntnx_image_placement_policy/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml b/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml index cca46377f..4bf0a4488 100644 --- a/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml +++ b/tests/integration/targets/ntnx_image_placement_policy/tasks/update.yml @@ -57,7 +57,7 @@ - result.changed == false - "'Nothing to change' in result.msg" fail_msg: "Image placement policy got updated" - success_msg: "Image placement policy update skipped succesfully due to no changes in spec" + success_msg: "Image placement policy update skipped successfully due to no changes in spec" ##################################################################################################### diff --git a/tests/integration/targets/ntnx_images/aliases b/tests/integration/targets/ntnx_images/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_images/aliases +++ b/tests/integration/targets/ntnx_images/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_images/tasks/update.yml b/tests/integration/targets/ntnx_images/tasks/update.yml index 79484eaaa..0cd1a4812 100644 --- a/tests/integration/targets/ntnx_images/tasks/update.yml +++ b/tests/integration/targets/ntnx_images/tasks/update.yml @@ -60,7 +60,7 @@ - result.failed == false - "'Nothing to change' in result.msg" fail_msg: "Image got updated" - success_msg: "Image update skipped succesfully due to no changes in spec" + success_msg: "Image update skipped successfully due to no changes in spec" ######################################################################################## diff --git a/tests/integration/targets/ntnx_images_info/aliases b/tests/integration/targets/ntnx_images_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_images_info/aliases +++ b/tests/integration/targets/ntnx_images_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/aliases b/tests/integration/targets/ntnx_karbon_clusters_and_info/aliases index 87e7bdaae..e69de29bb 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/aliases +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/aliases @@ -1 +0,0 @@ -disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/create_delete_and_info.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml similarity index 57% rename from tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/create_delete_and_info.yml rename to tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml index f721fdd78..7902da501 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/create_delete_and_info.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/crud.yml @@ -145,7 +145,7 @@ - result.response.cni_config.pod_ipv4_cidr == "{{cni.pod_ipv4_cidr}}" - result.response.cni_config.service_ipv4_cidr == "{{cni.service_ipv4_cidr}}" fail_msg: " Fail: unable to create DEV cluster with Flannel network provider" - success_msg: " Pass: create DEV cluster with Flannel network provider succesfully " + success_msg: " Pass: create DEV cluster with Flannel network provider successfully " ############################# - name: delete dev cluster with Flannel network provider ntnx_karbon_clusters: @@ -162,7 +162,7 @@ - result.failed == false - result.response.status == "SUCCEEDED" fail_msg: " Fail: unable to delete dev cluster with Flannel network provider" - success_msg: " Pass: delete dev cluster with Flannel network provider finished succesfully" + success_msg: " Pass: delete dev cluster with Flannel network provider finished successfully" ############################# - name: create DEV cluster with Calico network provider ntnx_karbon_clusters: @@ -183,7 +183,7 @@ num_instances: 1 cpu: 4 memory_gb: 8 - disk_gb: 120 + disk_gb: 40 masters: num_instances: 1 cpu: 4 @@ -219,7 +219,7 @@ - karbon_cluster.response.cni_config.service_ipv4_cidr == "{{cni.service_ipv4_cidr}}" - karbon_cluster.response.cni_config.pod_ipv4_cidr == "{{cni.pod_ipv4_cidr}}" fail_msg: " Fail: unable to create DEV cluster with Calico network provider " - success_msg: " Pass: create DEV cluster with Calico network provider finished succesfully" + success_msg: " Pass: create DEV cluster with Calico network provider finished successfully" ############################# - name: test getting dev cluster using name @@ -273,6 +273,233 @@ fail_msg: " Fail: Unable to get particular Cluster and it's kube config " success_msg: " Pass: Cluster info obtained successfully with it's kube config " ############################# +- name: Generate random node_pool name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=9,upper = false)[0]}}" + +- set_fact: + suffix_name: "ansible" + +- set_fact: + node1_name: "{{random_name}}{{suffix_name}}1" + node2_name: "{{random_name}}{{suffix_name}}2" + node3_name: "{{random_name}}{{suffix_name}}3" + +- debug: + msg: Start testing ntnx_karbon_clusters_node_pools + +- name: Create node pool with subnet uuid + ntnx_karbon_clusters_node_pools: + node_subnet: + uuid: "{{network.dhcp.uuid}}" + node_pool_name: "{{node1_name}}" + cluster_name: "{{karbon_name}}" + pool_config: + num_instances: 2 + cpu: 4 + memory_gb: 8 # for etcd min 8 + disk_gb: 120 + register: result + ignore_errors: true + +- name: Creation Status + assert: + that: + - result.changed == true + - result.failed == false + - result.cluster_name == "{{karbon_name}}" + - result.response is defined + - result.node_pool_name is defined + - result.node_pool_name=="{{node1_name}}" + fail_msg: "Fail: Unable to Create node pool " + success_msg: "Passed: Create node pool finished successfully " +################################# +- name: try to update node pool config with same values + ntnx_karbon_clusters_node_pools: + node_pool_name: "{{node1_name}}" + cluster_name: "{{karbon_name}}" + pool_config: + num_instances: 2 + cpu: 4 + memory_gb: 8 # for etcd min 8 + disk_gb: 120 + register: result + ignore_errors: true + +- name: Creation Status + assert: + that: + - result.changed == false + - result.failed == false + - result.msg == "Nothing to change." + fail_msg: "Fail: idempotecy check fail " + success_msg: "Passed: Returned as expected " +################################# +- name: try to update node pool config with wrong labels + ntnx_karbon_clusters_node_pools: + node_pool_name: "{{node1_name}}" + cluster_name: "{{karbon_name}}" + pool_config: + num_instances: 2 + cpu: 4 + memory_gb: 8 # for etcd min 8 + disk_gb: 120 + add_labels: + propert.-+]y5: "string" + propert5: "string" + property4: "string+-.3-@" + register: result + ignore_errors: true + +- name: Creation Status + assert: + that: + - result.changed == false + - result.failed == true + fail_msg: "Fail: node pool updated with wrong labels " + success_msg: "Passed: Returned as expected " +################################# +- name: update pool by increasing cpu,memory_gb,num_instances and try to add wrong labels + ntnx_karbon_clusters_node_pools: + wait: True + node_pool_name: "{{node1_name}}" + cluster_name: "{{karbon_name}}" + pool_config: + cpu: 6 + memory_gb: 10 # for etcd min 8 + disk_gb: 150 + num_instances: 4 + add_labels: + property1: "test-property1" + property2: "test-property2" + property3: "test-property3" + propert.-+]y5: "string" + register: result + ignore_errors: true + +- name: Creation Status + assert: + that: + - result.changed == true + - result.failed == false + - result.cluster_name == "{{karbon_name}}" + - result.response is defined + - result.node_pool_name is defined + - result.node_pool_name=="{{node1_name}}" + - result.skipped == true + fail_msg: "Fail: Unable to update pool by increasing cpu,memory_gb,num_instances and try to add wrong labels " + success_msg: "Passed: update pool by increasing cpu,memory_gb,num_instances and try to add wrong labels finished successfully " +# ################################# +- name: update pool by add labels + ntnx_karbon_clusters_node_pools: + wait: True + node_pool_name: "{{node1_name}}" + cluster_name: "{{karbon_name}}" + add_labels: + property1: "test-property1" + property2: "test-property2" + property3: "test-property3" + register: result + ignore_errors: true + +- name: Creation Status + assert: + that: + - result.changed == true + - result.failed == false + - result.cluster_name == "{{karbon_name}}" + - result.response is defined + - result.node_pool_name is defined + - result.node_pool_name=="{{node1_name}}" + fail_msg: "Fail: Unable to update pool by add labels " + success_msg: "Passed: update pool by add labels finished successfully " +################################# +- name: update pool by decreasing cpu,memory_gb,num_instances and add remove labels + ntnx_karbon_clusters_node_pools: + wait: True + node_pool_name: "{{node1_name}}" + cluster_name: "{{karbon_name}}" + pool_config: + cpu: 5 + memory_gb: 9 # for etcd min 8 + disk_gb: 140 + num_instances: 3 + remove_labels: + - property2 + - property3 + register: result + ignore_errors: true + +- name: Creation Status + assert: + that: + - result.changed == true + - result.failed == false + - result.cluster_name == "{{karbon_name}}" + - result.response is defined + - result.node_pool_name is defined + - result.node_pool_name=="{{node1_name}}" + fail_msg: "Fail: Unable to update pool by decreasing cpu,memory_gb,num_instances and add remove labels " + success_msg: "Passed: update pool by decreasing cpu,memory_gb,num_instances and add remove labels finished successfully " +################################ +- name: delete pool + ntnx_karbon_clusters_node_pools: + state: absent + node_pool_name: "{{node1_name}}" + cluster_name: "{{karbon_name}}" + register: result + ignore_errors: true + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'SUCCEEDED' + - result.failed == false + - result.changed == true + fail_msg: " Unable to delete node pool " + success_msg: " node pool has been deleted successfully " +################################# + +- name: Create node pool with subnet name with default values + ntnx_karbon_clusters_node_pools: + node_subnet: + name: "{{network.dhcp.name}}" + node_pool_name: "{{node2_name}}" + cluster_name: "{{karbon_name}}" + register: result + ignore_errors: true + +- name: Creation Status + assert: + that: + - result.changed == true + - result.failed == false + - result.cluster_name == "{{karbon_name}}" + - result.response is defined + - result.node_pool_name is defined + - result.node_pool_name=="{{node2_name}}" + fail_msg: "Fail: Unable to Create node pool with subnet name with default values " + success_msg: "Passed: Create node pool with subnet name with default values finished successfully " +################################# +- name: delete pool + ntnx_karbon_clusters_node_pools: + state: absent + node_pool_name: "{{node2_name}}" + cluster_name: "{{karbon_name}}" + register: result + ignore_errors: true + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'SUCCEEDED' + - result.failed == false + - result.changed == true + fail_msg: " Unable to delete node pool " + success_msg: " node pool has been deleted successfully " +################################# - name: delete dev cluster ntnx_karbon_clusters: state: absent @@ -288,5 +515,5 @@ - result.failed == false - result.response.status == "SUCCEEDED" fail_msg: " Fail: unable to delete dev cluster with Calico network provider" - success_msg: " Pass: delete dev cluster with Calico network provider finished succesfully" + success_msg: " Pass: delete dev cluster with Calico network provider finished successfully" ############################# \ No newline at end of file diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/main.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/main.yml index a07bee2dc..f7d047c91 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/main.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/main.yml @@ -6,5 +6,5 @@ nutanix_password: "{{ password }}" validate_certs: "{{ validate_certs }}" block: - - import_tasks: "create_delete_and_info.yml" - - import_tasks: "negativ_scenarios.yml" + - import_tasks: "crud.yml" + - import_tasks: "negative_scenarios.yml" diff --git a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negativ_scenarios.yml b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml similarity index 80% rename from tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negativ_scenarios.yml rename to tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml index f3b2f1f8c..bd897a6dd 100644 --- a/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negativ_scenarios.yml +++ b/tests/integration/targets/ntnx_karbon_clusters_and_info/tasks/negative_scenarios.yml @@ -6,49 +6,6 @@ karbon_name: "test-module22" ############################# -- name: create cluster with disk_gb size less than minimum - ntnx_karbon_clusters: - cluster: - name: "{{cluster.name}}" - name: "{{karbon_name}}" - k8s_version: "{{k8s_version}}" - host_os: "{{host_os}}" - node_subnet: - uuid: "{{network.dhcp.uuid}}" - cni: - node_cidr_mask_size: "{{cni.node_cidr_mask_size}}" - service_ipv4_cidr: "{{cni.service_ipv4_cidr}}" - pod_ipv4_cidr: "{{cni.pod_ipv4_cidr}}" - network_provider: Calico - custom_node_configs: - etcd: - num_instances: 1 - cpu: 4 - memory_gb: 8 - disk_gb: 80 - storage_class: - nutanix_cluster_password: "{{nutanix_cluster_password}}" - nutanix_cluster_username: "{{nutanix_cluster_username}}" - default_storage_class: True - name: test-storage-class - reclaim_policy: Retain - storage_container: "{{storage_container.name}}" - file_system: xfs - flash_mode: true - register: result - ignore_errors: true - -- name: Creation Status - assert: - that: - - result.response == {} - - result.changed == false - - result.failed == true - - result.error == "disk_gb cannot be less then 120" - - result.msg == "Failed generating create cluster spec" - fail_msg: " Fail: cluster creaeted with wrong disk_gb size less than minimum" - success_msg: " Pass: Retunred as expected" -############################# - name: create cluster with cpu less than minimum ntnx_karbon_clusters: cluster: diff --git a/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml b/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml index bbb1c66be..292d7cf16 100644 --- a/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml +++ b/tests/integration/targets/ntnx_karbon_registries/tasks/create.yml @@ -47,7 +47,7 @@ - result.response.name == "{{registry_name}}" - result.response.uuid is defined fail_msg: "Fail: Unable to create registery" - success_msg: "Pass: create registry finished succesfully" + success_msg: "Pass: create registry finished successfully" ################################################################ - name: delete registry ntnx_karbon_registries: @@ -64,7 +64,7 @@ - result.changed == true - result.response.registry_name == "{{registry_name}}" fail_msg: "Fail: Unable to delete created registry" - success_msg: "Pass: delete registry finished succesfully" + success_msg: "Pass: delete registry finished successfully" ################################################################ @@ -85,7 +85,7 @@ - result.changed == true - result.response.uuid is defined fail_msg: "Fail: unable to create registry with username and password" - success_msg: "Pass: create registry with username and password finished succesfully" + success_msg: "Pass: create registry with username and password finished successfully" ############################################################### - name: delete registry ntnx_karbon_registries: @@ -102,4 +102,4 @@ - result.changed == true - "'{{registry_name}}' in result.response.registry_name" fail_msg: "Fail: unable to delete created registry with username and password" - success_msg: "Pass: delete registry finished succesfully" + success_msg: "Pass: delete registry finished successfully" diff --git a/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml b/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml index 121ce265f..5e26f9517 100644 --- a/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml +++ b/tests/integration/targets/ntnx_karbon_registries/tasks/negativ_scenarios.yml @@ -16,5 +16,5 @@ - result.response is defined - result.failed == true - result.changed == false - fail_msg: "Fail: create registery with wrong port number finished succesfully" + fail_msg: "Fail: create registery with wrong port number finished successfully" success_msg: "Pass: Returned as expected " \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/aliases b/tests/integration/targets/ntnx_ndb_availability_databases/aliases index 139597f9c..d10d66c76 100644 --- a/tests/integration/targets/ntnx_ndb_availability_databases/aliases +++ b/tests/integration/targets/ntnx_ndb_availability_databases/aliases @@ -1,2 +1,2 @@ - +disabled diff --git a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml index b5d83345a..7d55b64f5 100644 --- a/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_availability_databases/tasks/tests.yml @@ -419,12 +419,14 @@ register: result +# skip jekyll/Liquid syntax check +# {% raw %} - name: create properties map set_fact: properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true - +# {% endraw %} - name: Creation Status assert: @@ -548,13 +550,14 @@ register: result - +# skip jekyll/Liquid syntax check +# {% raw %} - name: create properties map set_fact: properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true - +# {% endraw %} - name: Creation Status assert: diff --git a/tests/integration/targets/ntnx_ndb_clusters/aliases b/tests/integration/targets/ntnx_ndb_clusters/aliases index e69de29bb..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_clusters/aliases +++ b/tests/integration/targets/ntnx_ndb_clusters/aliases @@ -0,0 +1 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_database_clones/aliases b/tests/integration/targets/ntnx_ndb_database_clones/aliases index e69de29bb..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_database_clones/aliases +++ b/tests/integration/targets/ntnx_ndb_database_clones/aliases @@ -0,0 +1 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml b/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml index 31ceb27bc..899853d9e 100644 --- a/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml +++ b/tests/integration/targets/ntnx_ndb_database_clones/tasks/clones.yml @@ -284,11 +284,14 @@ ansible-clones: ansible-test-db-clones register: result +# skip jekyll/Liquid syntax check +# {% raw %} - name: create properties map set_fact: - properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true +# {% endraw %} - name: Clone create status assert: @@ -448,7 +451,7 @@ - result.response.userPitrTimestamp == "2023-02-04 07:29:36" - result.response.timeZone == "UTC" fail_msg: "creation refresh db clone spec failed" - success_msg: "refresh db clone spec created succesfully" + success_msg: "refresh db clone spec created successfully" - name: refresh db clone diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/aliases b/tests/integration/targets/ntnx_ndb_databases_actions/aliases index 139597f9c..2774d8d3a 100644 --- a/tests/integration/targets/ntnx_ndb_databases_actions/aliases +++ b/tests/integration/targets/ntnx_ndb_databases_actions/aliases @@ -1,2 +1,3 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml index 31937aea3..c75d79436 100644 --- a/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml +++ b/tests/integration/targets/ntnx_ndb_databases_actions/tasks/all_actions.yml @@ -179,7 +179,7 @@ - result.response.timeMachineId == time_machine_uuid - result.response.lcmConfig.expiryDetails.expireInDays == 4 fail_msg: "Unable to create snapshot with expiry config" - success_msg: "Snapshot with expiry config created succesfully" + success_msg: "Snapshot with expiry config created successfully" @@ -257,7 +257,7 @@ - result.response.lcmConfig.expiryDetails.expireInDays == 6 fail_msg: "Unable to add expiry schedule and rename it" - success_msg: "Snapshot updated succesfully" + success_msg: "Snapshot updated successfully" - name: Idempotency check @@ -623,12 +623,15 @@ - test2 register: result +# {% raw %} + - name: create linked databases to its uuid map set_fact: linked_databases: "{{ linked_databases | default({}) | combine ({ item['name'] : item['id'] }) }}" loop: "{{result.response}}" no_log: true +# {% endraw %} - name: check linked database update status assert: @@ -648,12 +651,15 @@ database_uuid: "{{linked_databases.test1}}" register: result +# {% raw %} + - name: create linked database map set_fact: linked_databases: "{{ linked_databases | default({}) | combine ({ item['name'] : item['id'] }) }}" loop: "{{result.response}}" no_log: true +# {% endraw %} - name: check linked database update status assert: diff --git a/tests/integration/targets/ntnx_ndb_databases_sanity/aliases b/tests/integration/targets/ntnx_ndb_databases_sanity/aliases new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_sanity/aliases @@ -0,0 +1,2 @@ + + diff --git a/tests/integration/targets/ntnx_ndb_databases_sanity/meta/main.yml b/tests/integration/targets/ntnx_ndb_databases_sanity/meta/main.yml new file mode 100644 index 000000000..ea2e9da19 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_sanity/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_ndb_env diff --git a/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/main.yml b/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/main.yml new file mode 100644 index 000000000..d09f77ab1 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- module_defaults: + group/nutanix.ncp.ntnx: + nutanix_host: "{{ ndb_ip }}" + nutanix_username: "{{ ndb_username }}" + nutanix_password: "{{ ndb_password }}" + validate_certs: false + + block: + - import_tasks: "tests.yml" diff --git a/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml new file mode 100644 index 000000000..6e8240cf9 --- /dev/null +++ b/tests/integration/targets/ntnx_ndb_databases_sanity/tasks/tests.yml @@ -0,0 +1,578 @@ +--- +# Summary: +# This playbook will test basic database flows + + +- debug: + msg: "start ndb databases crud tests" + +- name: Generate random name + set_fact: + random_name: "{{query('community.general.random_string',numbers=false, special=false,length=12)}}" + +- set_fact: + db1_name: "{{random_name[0]}}" + db1_name_updated: "{{random_name[0]}}-updated" + vm1_name: "{{random_name[0]}}-vm" + +################################### Single instance postgres database tests ############################# + + +- name: create spec for single instance postgres database on new db server vm + check_mode: yes + ntnx_ndb_databases: + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + create_new_server: + name: "{{ vm1_name }}" + desc: vm for db server + password: "test_password" + cluster: + name: "{{cluster.cluster1.name}}" + software_profile: + name: "{{ software_profile.name }}" + network_profile: + name: "{{ network_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + pub_ssh_key: "test_key" + + postgres: + listener_port: "9999" + db_name: testAnsible + db_password: "test_password" + db_size: 200 + type: "single" + auto_tune_staging_drive: false + allocate_pg_hugepage: true + pre_create_script: "ls" + post_create_script: "ls -a" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + auto_tune_staging_drive: False + tags: + ansible-databases: "single-instance-dbs" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + register: result + +- set_fact: + expected_action_arguments: [ + { + "name": "dbserver_description", + "value": "vm for db server" + }, + { + "name": "listener_port", + "value": "9999" + }, + { + "name": "auto_tune_staging_drive", + "value": false + }, + { + "name": "allocate_pg_hugepage", + "value": True + }, + { + "name": "cluster_database", + "value": false + }, + { + "name": "auth_method", + "value": "md5" + }, + { + "name": "db_password", + "value": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER" + }, + { + "name": "pre_create_script", + "value": "ls" + }, + { + "name": "post_create_script", + "value": "ls -a" + }, + { + "name": "database_names", + "value": "testAnsible" + }, + { + "name": "database_size", + "value": "200" + } + ] + +- set_fact: + expected_time_machine_info: { + "autoTuneLogDrive": true, + "description": "TM-desc", + "name": "TM1", + "schedule": { + "continuousSchedule": { + "enabled": true, + "logBackupInterval": 30, + "snapshotsPerDay": 2 + }, + "monthlySchedule": { + "dayOfMonth": 4, + "enabled": true + }, + "quartelySchedule": { + "dayOfMonth": 4, + "enabled": true, + "startMonth": "JANUARY" + }, + "snapshotTimeOfDay": { + "hours": 11, + "minutes": 10, + "seconds": 2 + }, + "weeklySchedule": { + "dayOfWeek": "WEDNESDAY", + "enabled": true + } + }, + "slaId": "{{sla.uuid}}" + } + +- set_fact: + mainetance_tasks: { + "maintenanceWindowId": "{{maintenance.window_uuid}}", + "tasks": [ + { + "payload": { + "prePostCommand": { + "postCommand": "ls -a", + "preCommand": "ls" + } + }, + "taskType": "OS_PATCHING" + }, + { + "payload": { + "prePostCommand": { + "postCommand": "ls -F", + "preCommand": "ls -l" + } + }, + "taskType": "DB_PATCHING" + } + ] + } + +- name: Check mode status + assert: + that: + - result.response is defined + - result.changed == False + - result.response.name == db1_name + - result.response.databaseDescription == "ansible-created-db-desc" + - result.response.actionArguments == expected_action_arguments + - result.response.computeProfileId == "{{compute_profile.uuid}}" + - result.response.networkProfileId == "{{network_profile.uuid}}" + - result.response.dbParameterProfileId == "{{db_params_profile.uuid}}" + - result.response.softwareProfileId == "{{software_profile.uuid}}" + - result.response.autoTuneStagingDrive == False + - result.response.timeMachineInfo == expected_time_machine_info + - result.response.nodes | length == 1 + - result.response.nodeCount == 1 + - result.response.nodes[0].nxClusterId == "{{cluster.cluster1.uuid}}" + - result.response.maintenanceTasks == mainetance_tasks + - result.response.createDbserver == True + fail_msg: "Unable to create single instance postgres database provision spec" + success_msg: "single instance postgres database provision spec created successfully" + + + +- name: create single instance postgres database on new db server vm + ntnx_ndb_databases: + wait: true + name: "{{db1_name}}" + desc: "ansible-created-db-desc" + + db_params_profile: + name: "{{db_params_profile.name}}" + + db_vm: + create_new_server: + ip: "{{ vm_ip }}" + name: "{{ vm1_name }}" + desc: "vm for db server" + password: "{{ vm_password }}" + cluster: + name: "{{cluster.cluster1.name}}" + software_profile: + name: "{{ software_profile.name }}" + network_profile: + name: "{{ static_network_profile.name }}" + compute_profile: + name: "{{ compute_profile.name }}" + pub_ssh_key: "{{ public_ssh_key }}" + + postgres: + listener_port: "5432" + db_name: testAnsible + db_password: "{{ vm_password }}" + db_size: 200 + type: "single" + + time_machine: + name: TM1 + desc: TM-desc + sla: + name: "{{ sla.name }}" + schedule: + daily: "11:10:02" + weekly: WEDNESDAY + monthly: 4 + quaterly: JANUARY + log_catchup: 30 + snapshots_per_day: 2 + tags: + ansible-databases: "single-instance-dbs" + + automated_patching: + maintenance_window: + name: "{{ maintenance.window_name }}" + tasks: + - type: "OS_PATCHING" + pre_task_cmd: "ls" + post_task_cmd: "ls -a" + - type: "DB_PATCHING" + pre_task_cmd: "ls -l" + post_task_cmd: "ls -F" + register: result + +- set_fact: + db_uuid: "{{result.db_uuid}}" + +- name: create properties map + set_fact: + properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" + loop: "{{result.response.properties}}" + no_log: true + +- name: Creation Status + assert: + that: + - result.response is defined + - result.response.status == 'READY' + - result.db_uuid is defined + - result.changed == true + - result.response.name == db1_name + - result.response.description == "ansible-created-db-desc" + - result.response.type == "postgres_database" + - properties["vm_ip"] == vm_ip + - properties["listener_port"] == "5432" + - properties["db_parameter_profile_id"] == db_params_profile.uuid + - properties["auth"] == "md5" + - result.response.databaseNodes[0].status == "READY" + - result.response.tags | length == 1 + - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].value == "single-instance-dbs" + + fail_msg: "Unable to create single instance postgres database" + success_msg: "single instance postgres database created successfully" + +- set_fact: + db_server_uuid: "{{result.response.databaseNodes[0].dbserverId}}" + +- name: get vm details associated to the database instance created above and verify + ntnx_ndb_db_servers_info: + uuid: "{{db_server_uuid}}" + register: result + +- name: Verify DB server VM status + assert: + that: + - result.response is defined + - result.response.status == 'UP' + - result.changed == False + - result.response.name == vm1_name + - result.response.nxClusterId == cluster.cluster1.uuid + - result.response.description == "vm for db server" + fail_msg: "Unable to verify db server vm" + success_msg: "db server vm created by database instance creation verified successfully" + +################################### update tests ############################# + + +- name: update database with check mode + check_mode: yes + ntnx_ndb_databases: + wait: true + db_uuid: "{{db_uuid}}" + name: "{{db1_name_updated}}" + desc: "ansible-created-db-desc-updated" + + tags: + ansible-databases: "single-instance-dbs-updated" + register: result + +- name: check mode status + assert: + that: + - result.response is defined + - result.changed == False + - result.response.name == db1_name_updated + - result.response.description == "ansible-created-db-desc-updated" + + fail_msg: "Unable to create single instance postgres database update spec" + success_msg: "single instance postgres database update spec generated successfully" + +- name: update database + ntnx_ndb_databases: + wait: true + db_uuid: "{{db_uuid}}" + name: "{{db1_name_updated}}" + desc: "ansible-created-db-desc-updated" + + tags: + ansible-databases: "single-instance-dbs-updated" + register: result + +- name: update status + assert: + that: + - result.response is defined + - result.response.status == 'READY' + - result.db_uuid is defined + - result.changed == true + - result.response.name == db1_name_updated + - result.response.description == "ansible-created-db-desc-updated" + - result.response.tags | length == 1 + - result.response.tags[0].tagName == "{{tags.databases.name}}" + - result.response.tags[0].value == "single-instance-dbs-updated" + + + fail_msg: "Unable to update single instance postgres database" + success_msg: "single instance postgres database updated successfully" + + +- name: idempotency checks + ntnx_ndb_databases: + wait: true + db_uuid: "{{db_uuid}}" + name: "{{db1_name_updated}}" + desc: "ansible-created-db-desc-updated" + + tags: + ansible-databases: "single-instance-dbs-updated" + register: result + +- name: check idempotency status + assert: + that: + - result.changed == false + - result.failed == false + - "'Nothing to change' in result.msg" + fail_msg: "database got updated" + success_msg: "database update skipped successfully due to no changes in spec" + +################################### delete tests ############################# + +- name: create spec for delete db from vm + check_mode: yes + ntnx_ndb_databases: + state: "absent" + db_uuid: "{{db_uuid}}" + wait: true + delete_db_from_vm: true + register: result + +- name: verify delete check mode spec + assert: + that: + - result.changed == false + - result.failed == false + - result.response.delete == True + - result.response.remove == False + - result.response.deleteTimeMachine == False + fail_msg: "creation of spec for delete db from vm failed" + success_msg: "spec for delete db from vm created successfully" + + + +- name: create spec for soft remove + check_mode: yes + ntnx_ndb_databases: + state: "absent" + db_uuid: "{{db_uuid}}" + wait: true + soft_delete: true + delete_time_machine: true + register: result + +- name: verify soft remove check mode spec + assert: + that: + - result.changed == false + - result.failed == false + - result.response.delete == False + - result.response.remove == False + - result.response.softRemove == True + - result.response.deleteTimeMachine == True + fail_msg: "creation of spec for soft remove with time machine delete failed" + success_msg: "spec for soft remove with time machine delete created successfully" + + +#####################################INFO Module tests####################################################### + +- debug: + msg: Start testing ntnx_ndb_databases_info based on created database + +- name: List ndb databases + ntnx_ndb_databases_info: + register: databases + no_log: true + +- name: check listing status + assert: + that: + - databases.response is defined + - databases.failed == false + - databases.changed == false + - databases.response | length > 0 + fail_msg: "Unable to list all era databases" + success_msg: "era databases listed successfully" +################################################################ +- name: Get era databases using its name + ntnx_ndb_databases_info: + name: "{{databases.response[0].name}}" + register: result + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.id == "{{databases.response[0].id}}" + fail_msg: "Unable to Get era databases using its name" + success_msg: "Get era databases using its name finished successfully" +################################################################ +- name: Get era databases using its id + ntnx_ndb_databases_info: + uuid: "{{databases.response[0].id}}" + register: result + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{databases.response[0].name}}" + fail_msg: "Unable to Get era databases using its id" + success_msg: "Get era databases using its id finished successfully" + +################################################################ +- name: Get era databases using its id and detailed response + ntnx_ndb_databases_info: + filters: + detailed: True + uuid: "{{databases.response[0].id}}" + register: result + no_log: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.name == "{{databases.response[0].name}}" + - result.response.timeMachine is defined + + fail_msg: "Unable to Get era databases using its id" + success_msg: "Get era databases using its id finished successfully" + + +################################################################ + +- name: get era database with incorrect name + ntnx_ndb_databases_info: + name: "xxxxxxx" + register: result + ignore_errors: True + no_log: true + +- name: check listing status + assert: + that: + - result.error is defined + - result.failed == true + - result.changed == false + fail_msg: "module didn't errored out correctly when incorrect name is given" + success_msg: "module errored out correctly when incorrect name is given" + +############################################################################################ + + +- name: unregister db along with delete time machine + ntnx_ndb_databases: + db_uuid: "{{db_uuid}}" + state: "absent" + wait: true + delete_time_machine: true + register: result + +- name: verify status of delete of database along with time machine delete + assert: + that: + - result.changed == True + - result.failed == false + - result.response.status == "5" + fail_msg: "database delete failed" + success_msg: "database deleted successfully" + + +- name: delete db server vm + ntnx_ndb_db_server_vms: + state: "absent" + wait: True + uuid: "{{db_server_uuid}}" + delete_from_cluster: True + delete_vgs: True + delete_vm_snapshots: True + register: result + +- name: check delete status + assert: + that: + - result.response is defined + - result.changed == True + - result.response.status == "5" + + fail_msg: "Unable to delete db server vm" + success_msg: "DB server VM deleted successfully" diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/aliases b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/aliases index 139597f9c..2774d8d3a 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/aliases +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/aliases @@ -1,2 +1,3 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml index 817de3937..65efd06c7 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_1/tasks/tests.yml @@ -277,11 +277,14 @@ - set_fact: db_uuid: "{{result.db_uuid}}" +# skip jekyll/Liquid syntax check +# {% raw %} - name: create properties map set_fact: properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true +# {% endraw %} - name: Creation Status assert: @@ -398,7 +401,7 @@ - result.failed == false - "'Nothing to change' in result.msg" fail_msg: "database got updated" - success_msg: "database update skipped succesfully due to no changes in spec" + success_msg: "database update skipped successfully due to no changes in spec" ################################### delete tests ############################# @@ -660,11 +663,14 @@ register: result +# skip jekyll/Liquid syntax check +# {% raw %} - name: create properties map set_fact: properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true +# {% endraw %} - name: Creation Status assert: diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/aliases b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/aliases index 139597f9c..2774d8d3a 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/aliases +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/aliases @@ -1,2 +1,3 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml index a181413eb..59287e1f4 100644 --- a/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml +++ b/tests/integration/targets/ntnx_ndb_databases_single_instance_2/tasks/tests.yml @@ -92,12 +92,16 @@ - set_fact: db_uuid: "{{result.db_uuid}}" +# skip jekyll/Liquid syntax check +# {% raw %} - name: create properties map set_fact: properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true +# {% endraw %} + - name: Creation Status assert: that: @@ -326,13 +330,16 @@ register: result - +# skip jekyll/Liquid syntax check +# {% raw %} - name: create properties map set_fact: properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true +# {% endraw %} + - name: Creation Status assert: that: diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/aliases b/tests/integration/targets/ntnx_ndb_db_server_vms/aliases index 139597f9c..2774d8d3a 100644 --- a/tests/integration/targets/ntnx_ndb_db_server_vms/aliases +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/aliases @@ -1,2 +1,3 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml index 5b0cc18e7..60f07a1c4 100644 --- a/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_db_server_vms/tasks/crud.yml @@ -43,11 +43,14 @@ register: check_mode_result +# skip jekyll/Liquid syntax check +# {% raw %} - name: create action_arguments map set_fact: action_arguments: "{{ action_arguments | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{check_mode_result.response.actionArguments}}" no_log: true +# {% endraw %} - set_fact: mainetance_tasks: { @@ -257,12 +260,14 @@ post_task_cmd: "ls -F" register: result - +# skip jekyll/Liquid syntax check +# {% raw %} - name: create properties map set_fact: properties: "{{ properties | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true +# {% endraw %} - name: Creation Status assert: @@ -340,7 +345,7 @@ - result.failed == false - "'Nothing to change' in result.msg" fail_msg: "db server vm got updated" - success_msg: "db server vm update skipped succesfully due to no changes in state" + success_msg: "db server vm update skipped successfully due to no changes in state" - name: update db server vm name with check mode and check defaults @@ -821,12 +826,16 @@ - set_fact: action_arguments: {} +# skip jekyll/Liquid syntax check +# {% raw %} - name: create action_arguments map set_fact: action_arguments: "{{ action_arguments | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.actionArguments}}" no_log: true +# {% endraw %} + - set_fact: maintenance_tasks: { "maintenanceWindowId": "{{maintenance.window_uuid}}", @@ -895,11 +904,14 @@ register: result +# skip jekyll/Liquid syntax check +# {% raw %} - name: create properties map set_fact: properties1: "{{ properties1 | default({}) | combine ({ item['name'] : item['value'] }) }}" loop: "{{result.response.properties}}" no_log: true +# {% endraw %} - name: Creation Status assert: diff --git a/tests/integration/targets/ntnx_ndb_maintenance_windows/aliases b/tests/integration/targets/ntnx_ndb_maintenance_windows/aliases index 8b1378917..b29a52bdb 100644 --- a/tests/integration/targets/ntnx_ndb_maintenance_windows/aliases +++ b/tests/integration/targets/ntnx_ndb_maintenance_windows/aliases @@ -1 +1,2 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_profiles/aliases b/tests/integration/targets/ntnx_ndb_profiles/aliases index 139597f9c..8b1378917 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/aliases +++ b/tests/integration/targets/ntnx_ndb_profiles/aliases @@ -1,2 +1 @@ - diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml index e8ef5a45b..0f3521d25 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/compute.yml @@ -68,7 +68,7 @@ - result.response.versions[0].propertiesMap.CPUS == "2" - result.response.versions[0].propertiesMap.MEMORY_SIZE == "8" - fail_msg: "Fail: create compute profile finished succesfully " + fail_msg: "Fail: create compute profile finished successfully " success_msg: "Pass: Unable to create compute profile" @@ -105,7 +105,7 @@ - result.response.version.propertiesMap.MEMORY_SIZE == "5" - result.response.version.published == true fail_msg: "Fail: unable to verify update of params in compute profile and publish profile" - success_msg: "Pass: verify update of params in compute profile and publish profile finished succesfully" + success_msg: "Pass: verify update of params in compute profile and publish profile finished successfully" ################################################################ - name: verify idempotency check in compute profile ntnx_ndb_profiles: @@ -123,7 +123,7 @@ - result.changed == false - result.profile_uuid is defined fail_msg: "Fail: unable to verify idempotency check in compute profile" - success_msg: "Pass: verify idempotency check in compute profile finished succesfully" + success_msg: "Pass: verify idempotency check in compute profile finished successfully" ################################################################ - name: verify unpublish flow in compute profile ntnx_ndb_profiles: @@ -145,7 +145,7 @@ - result.profile_uuid is defined - result.response.version.published == false fail_msg: "Fail: unable to verify unpublish flow in compute profile " - success_msg: "Pass: verify unpublish flow in compute profile finished succesfully" + success_msg: "Pass: verify unpublish flow in compute profile finished successfully" ################################################################ - name: Delete all created cmpute profiles ntnx_ndb_profiles: @@ -162,7 +162,7 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created compute profiles" - success_msg: "All compute profiles deleted succesfully" + success_msg: "All compute profiles deleted successfully" - set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml index 45c2622a6..ce59be447 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/db_params.yml @@ -102,7 +102,7 @@ - result.response.versions[0].propertiesMap.wal_buffers == "{{wal_buffers}}" - result.response.versions[0].propertiesMap.wal_keep_segments == "{{wal_keep_segments}}" fail_msg: "Fail: Unable to create db params profile " - success_msg: "Pass: Creation of db params profile finished succesfully " + success_msg: "Pass: Creation of db params profile finished successfully " - set_fact: todelete: "{{ todelete + [ result.profile_uuid ] }}" @@ -139,7 +139,7 @@ - result.response.profile.versions[0].propertiesMap.max_locks_per_transaction == "3" - result.response.profile.versions[0].propertiesMap.effective_io_concurrency == "4" fail_msg: "Fail: unable to verify update of params in database_parameter profile and publish profile " - success_msg: "Pass: verify update of params in database_parameter profile and publish profile finished succesfully" + success_msg: "Pass: verify update of params in database_parameter profile and publish profile finished successfully" ################################################################ - name: verify unpublish flow in database_parameter profile ntnx_ndb_profiles: @@ -160,7 +160,7 @@ - result.profile_uuid is defined - result.response.profile.versions[0].published == false fail_msg: "Fail: verify unpublish flow in database_parameter profile " - success_msg: "Pass: verify unpublish flow in database_parameter profile finished succesfully " + success_msg: "Pass: verify unpublish flow in database_parameter profile finished successfully " ################################################################ - name: verify creatition of db params profile with defaults ntnx_ndb_profiles: @@ -182,7 +182,7 @@ - result.response.type == "Database_Parameter" - result.response.versions is defined fail_msg: "Fail: Unable to verify creatition of db params profile with defaults " - success_msg: "Pass: verify creatition of db params profile with defaults finished succesfully " + success_msg: "Pass: verify creatition of db params profile with defaults finished successfully " - set_fact: todelete: "{{ todelete + [ result.profile_uuid ] }}" @@ -205,7 +205,7 @@ - result.response.profile.name == "{{profile3_name}}" - result.response.profile.description == "testdesc" fail_msg: "Fail: Unable to verify idempotency check " - success_msg: "Pass: verify idempotency check finished succesfully" + success_msg: "Pass: verify idempotency check finished successfully" ################################################################ - name: Delete all created Database_Parameter profiles ntnx_ndb_profiles: @@ -222,7 +222,7 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created Database_Parameter profiles" - success_msg: "All Database_Parameter profiles deleted succesfully" + success_msg: "All Database_Parameter profiles deleted successfully" - set_fact: todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml b/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml index 6926d39b1..e4089a412 100644 --- a/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml +++ b/tests/integration/targets/ntnx_ndb_profiles/tasks/network_profile.yml @@ -42,7 +42,7 @@ - result.response.versions[0].propertiesMap.VLAN_NAME == "{{network_profile.single.vlan_name}}" - result.response.versions[0].published == false fail_msg: "Fail: unable to verify create of single cluster network profile" - success_msg: "Pass: verify create of single cluster network profile finished succesfully " + success_msg: "Pass: verify create of single cluster network profile finished successfully " ################################################################ - name: update the profile for single cluster by name , desc , publish ntnx_ndb_profiles: @@ -68,7 +68,7 @@ - result.response.profile.versions[0].propertiesMap.VLAN_NAME == "{{network_profile.single.vlan_name}}" - result.response.profile.versions[0].published == true fail_msg: "Fail: unable to update the profile for single cluster by name , desc , publish " - success_msg: "Pass: update the profile for single cluster by name , desc , publish finished succesfully " + success_msg: "Pass: update the profile for single cluster by name , desc , publish finished successfully " - set_fact: todelete: "{{ todelete + [ result.profile_uuid ] }}" @@ -89,87 +89,87 @@ - result.changed == false - result.profile_uuid is defined fail_msg: "Fail: unable to verify idempotency check" - success_msg: "Pass: verify idempotency check finished succesfully " + success_msg: "Pass: verify idempotency check finished successfully " ################################################################ -- name: verify create of multiple cluster network profile - ntnx_ndb_profiles: - name: "{{profile3_name}}" - desc: "testdesc" - type: network - database_type: postgres - network: - topology: cluster - vlans: - - - cluster: - name: "{{network_profile.HA.cluster1.name}}" - vlan_name: "{{network_profile.HA.cluster1.vlan_name}}" - - - cluster: - name: "{{network_profile.HA.cluster2.name}}" - vlan_name: "{{network_profile.HA.cluster2.vlan_name}}" +# - name: verify create of multiple cluster network profile +# ntnx_ndb_profiles: +# name: "{{profile3_name}}" +# desc: "testdesc" +# type: network +# database_type: postgres +# network: +# topology: cluster +# vlans: +# - +# cluster: +# name: "{{network_profile.HA.cluster1.name}}" +# vlan_name: "{{network_profile.HA.cluster1.vlan_name}}" +# - +# cluster: +# name: "{{network_profile.HA.cluster2.name}}" +# vlan_name: "{{network_profile.HA.cluster2.vlan_name}}" - register: result - ignore_errors: true +# register: result +# ignore_errors: true -- name: check listing status - assert: - that: - - result.response is defined - - result.failed == false - - result.changed == true - - result.response.name == "{{profile3_name}}" - - result.response.description == "testdesc" - - result.response.type == "Network" - - result.response.topology == "cluster" - - result.response.versions[0].propertiesMap.ENABLE_IP_ADDRESS_SELECTION == "false" - - result.response.versions[0].propertiesMap.VLAN_NAME_0 == "{{network_profile.HA.cluster1.vlan_name}}" - - result.response.versions[0].propertiesMap.VLAN_NAME_1 == "{{network_profile.HA.cluster2.vlan_name}}" - - result.response.versions[0].propertiesMap.CLUSTER_NAME_0 == "{{network_profile.HA.cluster1.name}}" - - result.response.versions[0].propertiesMap.CLUSTER_NAME_1 == "{{network_profile.HA.cluster2.name}}" - fail_msg: "Fail: unable to verify create of multiple cluster network profile " - success_msg: "Pass: verify create of multiple cluster network profile finished sucessfully" +# - name: check listing status +# assert: +# that: +# - result.response is defined +# - result.failed == false +# - result.changed == true +# - result.response.name == "{{profile3_name}}" +# - result.response.description == "testdesc" +# - result.response.type == "Network" +# - result.response.topology == "cluster" +# - result.response.versions[0].propertiesMap.ENABLE_IP_ADDRESS_SELECTION == "false" +# - result.response.versions[0].propertiesMap.VLAN_NAME_0 == "{{network_profile.HA.cluster1.vlan_name}}" +# - result.response.versions[0].propertiesMap.VLAN_NAME_1 == "{{network_profile.HA.cluster2.vlan_name}}" +# - result.response.versions[0].propertiesMap.CLUSTER_NAME_0 == "{{network_profile.HA.cluster1.name}}" +# - result.response.versions[0].propertiesMap.CLUSTER_NAME_1 == "{{network_profile.HA.cluster2.name}}" +# fail_msg: "Fail: unable to verify create of multiple cluster network profile " +# success_msg: "Pass: verify create of multiple cluster network profile finished sucessfully" -- set_fact: - todelete: "{{ todelete + [ result.profile_uuid ] }}" +# - set_fact: +# todelete: "{{ todelete + [ result.profile_uuid ] }}" ################################################################ -- name: update the profile for multiple cluster by subnets, publish - ntnx_ndb_profiles: - type: network - profile_uuid: "{{result.profile_uuid}}" - network: - publish: true - topology: cluster - vlans: - - - cluster: - name: "{{network_profile.HA.cluster1.name}}" - vlan_name: "{{network_profile.HA.cluster1.vlan_name2}}" - - - cluster: - name: "{{network_profile.HA.cluster2.name}}" - vlan_name: "{{network_profile.HA.cluster2.vlan_name2}}" - register: result - ignore_errors: true +# - name: update the profile for multiple cluster by subnets, publish +# ntnx_ndb_profiles: +# type: network +# profile_uuid: "{{result.profile_uuid}}" +# network: +# publish: true +# topology: cluster +# vlans: +# - +# cluster: +# name: "{{network_profile.HA.cluster1.name}}" +# vlan_name: "{{network_profile.HA.cluster1.vlan_name2}}" +# - +# cluster: +# name: "{{network_profile.HA.cluster2.name}}" +# vlan_name: "{{network_profile.HA.cluster2.vlan_name2}}" +# register: result +# ignore_errors: true -- name: check listing status - assert: - that: - - result.response is defined - - result.failed == false - - result.changed == true - - result.response.profile.name == "{{profile3_name}}" - - result.response.profile.description == "testdesc" - - result.response.profile.type == "Network" - - result.response.profile.topology == "cluster" - - result.response.profile.versions[0].propertiesMap.ENABLE_IP_ADDRESS_SELECTION == "false" - - result.response.profile.versions[0].propertiesMap.VLAN_NAME_0 == "{{network_profile.HA.cluster1.vlan_name2}}" - - result.response.profile.versions[0].propertiesMap.VLAN_NAME_1 == "{{network_profile.HA.cluster2.vlan_name2}}" - - result.response.profile.versions[0].propertiesMap.CLUSTER_NAME_0 == "{{network_profile.HA.cluster1.name}}" - - result.response.profile.versions[0].propertiesMap.CLUSTER_NAME_1 == "{{network_profile.HA.cluster2.name}}" - - result.response.profile.versions[0].published == true - fail_msg: "Fail: unable to update the profile for multiple cluster by subnets, publish " - success_msg: "Pass: update the profile for multiple cluster by subnets, publish finished successfully" +# - name: check listing status +# assert: +# that: +# - result.response is defined +# - result.failed == false +# - result.changed == true +# - result.response.profile.name == "{{profile3_name}}" +# - result.response.profile.description == "testdesc" +# - result.response.profile.type == "Network" +# - result.response.profile.topology == "cluster" +# - result.response.profile.versions[0].propertiesMap.ENABLE_IP_ADDRESS_SELECTION == "false" +# - result.response.profile.versions[0].propertiesMap.VLAN_NAME_0 == "{{network_profile.HA.cluster1.vlan_name2}}" +# - result.response.profile.versions[0].propertiesMap.VLAN_NAME_1 == "{{network_profile.HA.cluster2.vlan_name2}}" +# - result.response.profile.versions[0].propertiesMap.CLUSTER_NAME_0 == "{{network_profile.HA.cluster1.name}}" +# - result.response.profile.versions[0].propertiesMap.CLUSTER_NAME_1 == "{{network_profile.HA.cluster2.name}}" +# - result.response.profile.versions[0].published == true +# fail_msg: "Fail: unable to update the profile for multiple cluster by subnets, publish " +# success_msg: "Pass: update the profile for multiple cluster by subnets, publish finished successfully" ################################################################ - name: Delete all created network profiles ntnx_ndb_profiles: @@ -186,7 +186,7 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created network profiles" - success_msg: "All network profiles deleted succesfully" + success_msg: "All network profiles deleted successfully" - set_fact: todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_profiles_info/aliases b/tests/integration/targets/ntnx_ndb_profiles_info/aliases index 139597f9c..8b1378917 100644 --- a/tests/integration/targets/ntnx_ndb_profiles_info/aliases +++ b/tests/integration/targets/ntnx_ndb_profiles_info/aliases @@ -1,2 +1 @@ - diff --git a/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml b/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml index 9691e884d..1cdd23174 100644 --- a/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_ndb_profiles_info/tasks/info.yml @@ -68,6 +68,44 @@ - result.response[0].type == "Network" fail_msg: "Unable to list all Network NDB profile" success_msg: "Network NDB profiles listed successfully" + +################################################################ +- name: get network profile with available IPs + ntnx_ndb_profiles_info: + name: "{{static_network_profile.name}}" + include_available_ips: true + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.available_ips | length > 0 + - result.response.id == "{{static_network_profile.uuid}}" + + fail_msg: "Unable to list network profile with available IPs" + success_msg: "Network NDB profiles along with available IPs obtained successfully" + +- name: get network profile with available IPs + ntnx_ndb_profiles_info: + uuid: "{{static_network_profile.uuid}}" + include_available_ips: true + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == false + - result.response.available_ips | length > 0 + - result.response.id == "{{static_network_profile.uuid}}" + fail_msg: "Unable to list network profile with available IPs" + success_msg: "Network NDB profiles along with available IPs obtained successfully" ################################################################ - name: List Compute profiles ntnx_ndb_profiles_info: diff --git a/tests/integration/targets/ntnx_ndb_slas/aliases b/tests/integration/targets/ntnx_ndb_slas/aliases index 139597f9c..2774d8d3a 100644 --- a/tests/integration/targets/ntnx_ndb_slas/aliases +++ b/tests/integration/targets/ntnx_ndb_slas/aliases @@ -1,2 +1,3 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_slas/tasks/CRUD.yml b/tests/integration/targets/ntnx_ndb_slas/tasks/CRUD.yml index 217ce7c5e..9783b72a2 100644 --- a/tests/integration/targets/ntnx_ndb_slas/tasks/CRUD.yml +++ b/tests/integration/targets/ntnx_ndb_slas/tasks/CRUD.yml @@ -51,7 +51,7 @@ - result.response.weeklyRetention == {{frequency.snapshots_retention.weekly}} fail_msg: "Fail: Verify creation of slas with check mode failed " - success_msg: "Pass: Verify creation of slas with check mode finished succesfully " + success_msg: "Pass: Verify creation of slas with check mode finished successfully " ################################################################ - name: Verify creation of slas ntnx_ndb_slas: @@ -125,7 +125,7 @@ - result.response.weeklyRetention == {{frequency.snapshots_retention.weekly}} - result.sla_uuid is defined fail_msg: "Fail: Unable to update sla " - success_msg: "Pass: verify slas update flow finished succesfully" + success_msg: "Pass: verify slas update flow finished successfully" ################################################################ update flow ######################################### - name: verify slas update flow with check mode ntnx_ndb_slas: @@ -158,7 +158,7 @@ - result.response.weeklyRetention == {{frequency.snapshots_retention.weekly}} - result.sla_uuid is defined fail_msg: "Fail: verify slas update flow with check mode " - success_msg: "Pass: verify slas update flow with check mode finished succesfully" + success_msg: "Pass: verify slas update flow with check mode finished successfully" ################################################################ - name: verify idempotency ntnx_ndb_slas: @@ -267,7 +267,7 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created slas" - success_msg: "All slas deleted succesfully" + success_msg: "All slas deleted successfully" - set_fact: todelete: [] diff --git a/tests/integration/targets/ntnx_ndb_snapshots_info/aliases b/tests/integration/targets/ntnx_ndb_snapshots_info/aliases index 139597f9c..2774d8d3a 100644 --- a/tests/integration/targets/ntnx_ndb_snapshots_info/aliases +++ b/tests/integration/targets/ntnx_ndb_snapshots_info/aliases @@ -1,2 +1,3 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/aliases b/tests/integration/targets/ntnx_ndb_software_profiles/aliases index e69de29bb..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_software_profiles/aliases +++ b/tests/integration/targets/ntnx_ndb_software_profiles/aliases @@ -0,0 +1 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml index 14979797f..c978d10de 100644 --- a/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_software_profiles/tasks/crud.yml @@ -522,11 +522,15 @@ - set_fact: clusters: {} +# skip jekyll/Liquid syntax check +# {% raw %} - name: create clusters status map set_fact: clusters: "{{ clusters | default({}) | combine ({ item['nxClusterId'] : item['status'] }) }}" loop: "{{result.response.profile.clusterAvailability}}" no_log: True +# {% endraw %} + - name: check status of replication assert: that: diff --git a/tests/integration/targets/ntnx_ndb_tags/tasks/crud.yml b/tests/integration/targets/ntnx_ndb_tags/tasks/crud.yml index de2320e20..82f41e5c9 100644 --- a/tests/integration/targets/ntnx_ndb_tags/tasks/crud.yml +++ b/tests/integration/targets/ntnx_ndb_tags/tasks/crud.yml @@ -11,7 +11,7 @@ tag_name: "{{random_name[0]}}" tag_name_updated: "{{random_name[0]}}-updated" -############################# Create & Delete tests ######################## +############################# Create tests ######################## - name: check mode for creation check_mode: yes @@ -38,7 +38,7 @@ - result.response == expected_response fail_msg: "Unable to create spec for tag" - success_msg: "Spec generated succefully for tag creation" + success_msg: "Spec generated successfully for tag creation" - name: create tags for clone @@ -61,25 +61,60 @@ - result.response.description == "tag-created-by-ansible" - result.response.required == true fail_msg: "Tag for clone create failed" - success_msg: "Tag for clone created succefully" + success_msg: "Tag for clone created successfully" +- set_fact: + clone_tag_uuid: "{{result.uuid}}" +- name: create tags for database server + ntnx_ndb_tags: + name: "{{tag_name}}-database-server" + desc: tag-created-by-ansible + tag_value_required: True + entity_type: DATABASE_SERVER + register: result -- name: delete the tag +- name: Check create status + assert: + that: + - result.response is defined + - result.changed == true + - result.uuid is defined + - result.response.name == "{{tag_name}}-database-server" + - result.response.entityType == "DATABASE_SERVER" + - result.response.status == "ENABLED" + - result.response.description == "tag-created-by-ansible" + - result.response.required == true + fail_msg: "Tag for database server create failed" + success_msg: "Tag for clone created successfully" + +- set_fact: + db_server_tag_uuid: "{{result.uuid}}" + +- name: create tags for time machine ntnx_ndb_tags: - state: "absent" - uuid: "{{result.uuid}}" + name: "{{tag_name}}-time-machine" + desc: tag-created-by-ansible + tag_value_required: True + entity_type: TIME_MACHINE register: result -- name: Check delete status +- name: Check create status assert: that: - result.response is defined - result.changed == true - - result.failed == false - fail_msg: "Unable to delete tag" - success_msg: "tag deleted succefully" + - result.uuid is defined + - result.response.name == "{{tag_name}}-time-machine" + - result.response.entityType == "TIME_MACHINE" + - result.response.status == "ENABLED" + - result.response.description == "tag-created-by-ansible" + - result.response.required == true + fail_msg: "Tag for time machine create failed" + success_msg: "Tag for time machine created successfully" +- set_fact: + time_machine_tag_uuid: "{{result.uuid}}" - name: create tags for databases ntnx_ndb_tags: @@ -89,6 +124,8 @@ entity_type: DATABASE register: result +- set_fact: + database_tag_uuid: "{{result.uuid}}" - name: check create status assert: @@ -101,14 +138,158 @@ - result.response.entityType == "DATABASE" - result.response.status == "ENABLED" fail_msg: "Tag create for databases failed" - success_msg: "Tag created succefully" + success_msg: "Tag created successfully" +- name: create another tag for databases + ntnx_ndb_tags: + name: "{{tag_name}}-database2" + desc: tag-created-by-ansible + tag_value_required: False + entity_type: DATABASE + register: result - set_fact: - tag_uuid: "{{result.uuid}}" + database_tag2_uuid: "{{result.uuid}}" + +- name: check create status + assert: + that: + - result.response is defined + - result.changed == true + - result.uuid is defined + - result.response.name == "{{tag_name}}-database2" + - result.response.required == false + - result.response.entityType == "DATABASE" + - result.response.status == "ENABLED" + fail_msg: "Tag create for databases failed" + success_msg: "Tag created successfully" + + +################################# Info Module tests ########################## + +- name: get all tags + ntnx_ndb_tags_info: + register: result + +- name: Check info status + assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length > 1 + fail_msg: "Fetching all tags failed" + success_msg: "All tags obtained successfully" + +- name: get tag based on uuid + ntnx_ndb_tags_info: + uuid: "{{database_tag_uuid}}" + register: result + +- name: Check info status + assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.entityType == "DATABASE" + - result.response.id == "{{database_tag_uuid}}" + fail_msg: "get tag based on uuid failed" + success_msg: "tag based on uuid obtained successfully" + +- name: get all tags based on DATABASE entity type + ntnx_ndb_tags_info: + filters: + entity_type: "DATABASE" + register: result + +- name: Check info status + assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length > 1 + - result.response[0].entityType == "DATABASE" + fail_msg: "get all tags based on DATABASE entity type failed" + success_msg: "all tags based on DATABASE entity type obtained successfully" + +- name: get all tags based on CLONE entity type + ntnx_ndb_tags_info: + filters: + entity_type: "CLONE" + register: result + +- name: Check info status + assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length > 0 + - result.response[0].entityType == "CLONE" + fail_msg: "get all tags based on CLONE entity type failed" + success_msg: "all tags based on CLONE entity type obtained successfully" + +- name: get all tags based on TIME_MACHINE entity type + ntnx_ndb_tags_info: + filters: + entity_type: "TIME_MACHINE" + register: result +- name: Check info status + assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length > 0 + - result.response[0].entityType == "TIME_MACHINE" + fail_msg: "get all tags based on TIME_MACHINE entity type failed" + success_msg: "all tags based on TIME_MACHINE entity type obtained successfully" + + +- name: get all tags based on DATABASE_SERVER entity type + ntnx_ndb_tags_info: + filters: + entity_type: "DATABASE_SERVER" + register: result + +- name: Check info status + assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response | length > 0 + - result.response[0].entityType == "DATABASE_SERVER" + fail_msg: "get all tags based on DATABASE_SERVER entity type failed" + success_msg: "all tags based on DATABASE_SERVER entity type obtained successfully" + + +- name: get tag based on DATABASE entity type and name + ntnx_ndb_tags_info: + filters: + entity_type: "DATABASE" + name: "{{tag_name}}-database2" + register: result + + +- name: Check info status + assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.entityType == "DATABASE" + - result.response.name == "{{tag_name}}-database2" + fail_msg: "get tag based on DATABASE entity type and name failed" + success_msg: "tags based on DATABASE entity type and name obtained successfully" ################################## Update tests ######################## +- set_fact: + tag_uuid: "{{database_tag_uuid}}" + - name: update tag ntnx_ndb_tags: uuid: "{{tag_uuid}}" @@ -129,7 +310,7 @@ - result.response.entityType == "DATABASE" - result.response.status == "DEPRECATED" fail_msg: "tag update failed" - success_msg: "tag updated succefully" + success_msg: "tag updated successfully" @@ -171,15 +352,80 @@ - result.uuid == "{{tag_uuid}}" - result.response.status == "ENABLED" fail_msg: "Enabling tag failed" - success_msg: "Tag enabled succefully" + success_msg: "Tag enabled successfully" + +############################################################### delete tests ######################################## -- name: delete the tag +- name: delete the database based tag ntnx_ndb_tags: state: "absent" - uuid: "{{tag_uuid}}" + uuid: "{{database_tag_uuid}}" + register: result + + +- name: Check delete status + assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + fail_msg: "Unable to delete tag" + success_msg: "tag deleted successfully" + +- name: delete the database based tag + ntnx_ndb_tags: + state: "absent" + uuid: "{{database_tag2_uuid}}" + register: result + +- name: Check delete status + assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + fail_msg: "Unable to delete tag" + success_msg: "tag deleted successfully" + + +- name: delete the clone tag + ntnx_ndb_tags: + state: "absent" + uuid: "{{clone_tag_uuid}}" + register: result + +- name: Check delete status + assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + fail_msg: "Unable to delete tag" + success_msg: "tag deleted successfully" + + +- name: delete the time machine based tag + ntnx_ndb_tags: + state: "absent" + uuid: "{{time_machine_tag_uuid}}" register: result +- name: Check delete status + assert: + that: + - result.response is defined + - result.changed == true + - result.failed == false + fail_msg: "Unable to delete tag" + success_msg: "tag deleted successfully" + + +- name: delete the database server based tag + ntnx_ndb_tags: + state: "absent" + uuid: "{{db_server_tag_uuid}}" + register: result - name: Check delete status assert: @@ -188,4 +434,4 @@ - result.changed == true - result.failed == false fail_msg: "Unable to delete tag" - success_msg: "tag deleted succefully" + success_msg: "tag deleted successfully" \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_time_machines_info/aliases b/tests/integration/targets/ntnx_ndb_time_machines_info/aliases index e69de29bb..87e7bdaae 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_info/aliases +++ b/tests/integration/targets/ntnx_ndb_time_machines_info/aliases @@ -0,0 +1 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/aliases b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/aliases index 139597f9c..2774d8d3a 100644 --- a/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/aliases +++ b/tests/integration/targets/ntnx_ndb_time_machines_with_multi_clusters/aliases @@ -1,2 +1,3 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_vlans/aliases b/tests/integration/targets/ntnx_ndb_vlans/aliases index 139597f9c..2774d8d3a 100644 --- a/tests/integration/targets/ntnx_ndb_vlans/aliases +++ b/tests/integration/targets/ntnx_ndb_vlans/aliases @@ -1,2 +1,3 @@ +disabled \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml b/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml index 2525a8224..79089f56b 100644 --- a/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml +++ b/tests/integration/targets/ntnx_ndb_vlans/tasks/create_vlans.yml @@ -70,7 +70,7 @@ - result.response.ipPools[1].endIP == "{{ndb_vlan.ip_pools.1.end_ip}}" - result.response.ipPools[1].startIP == "{{ndb_vlan.ip_pools.1.start_ip}}" fail_msg: "fail: unable to update ndb vlan type for static" - success_msg: "pass: update ndb vlan type for static finished succesfully" + success_msg: "pass: update ndb vlan type for static finished successfully" - set_fact: todelete: [] @@ -206,7 +206,7 @@ - result.response.ipPools[1].endIP == "{{ndb_vlan.ip_pools.1.end_ip}}" - result.response.ipPools[1].startIP == "{{ndb_vlan.ip_pools.1.start_ip}}" fail_msg: "fail: unable to create static ndb vlan" - success_msg: "pass: create static ndb vlan finished succesfully" + success_msg: "pass: create static ndb vlan finished successfully" - set_fact: todelete: "{{ todelete + [ result.vlan_uuid ] }}" @@ -239,7 +239,7 @@ - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.subnet_mask}}" - result.response.ipPools == [] fail_msg: "fail: unable to update ndb vlan by removing ip pool" - success_msg: "pass: update ndb vlan by removing ip pool finished succesfully" + success_msg: "pass: update ndb vlan by removing ip pool finished successfully" ################################################################ @@ -273,7 +273,7 @@ - result.response.ipPools[0].endIP == "{{ndb_vlan.ip_pools.0.end_ip}}" - result.response.ipPools[0].startIP == "{{ndb_vlan.ip_pools.0.start_ip}}" fail_msg: "fail: unable to update ndb vlan by adding a pool " - success_msg: "pass: update ndb vlan by adding a pool finished succesfully" + success_msg: "pass: update ndb vlan by adding a pool finished successfully" ################################################################ @@ -303,7 +303,7 @@ - result.response.propertiesMap.VLAN_SECONDARY_DNS == "{{ndb_vlan.updated_secondary_dns}}" - result.response.propertiesMap.VLAN_SUBNET_MASK == "{{ndb_vlan.updated_subnet_mask}}" fail_msg: "fail: unable to update ndb vLAN Configuration" - success_msg: "pass: update ndb vLAN Configuration finished succesfully" + success_msg: "pass: update ndb vLAN Configuration finished successfully" ################################################################ @@ -327,7 +327,7 @@ - result.vlan_uuid is defined - result.response.properties == [] fail_msg: "fail: unable to update ndb vlan type " - success_msg: "pass: update ndb vlan type finished succesfully" + success_msg: "pass: update ndb vlan type finished successfully" ################################################################ @@ -429,6 +429,6 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created vlan's" - success_msg: "All vlan'sdeleted succesfully" + success_msg: "All vlan'sdeleted successfully" - set_fact: todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml b/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml index 811233ada..aa716309c 100644 --- a/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml +++ b/tests/integration/targets/ntnx_ndb_vlans/tasks/negativ_scenarios.yml @@ -25,7 +25,7 @@ - result.changed == false - result.failed == true - result.msg == "Failed generating create vlan spec" - fail_msg: "fail: create Dhcp ndb vlan with static Configuration finished succesfully" + fail_msg: "fail: create Dhcp ndb vlan with static Configuration finished successfully" success_msg: "pass: Returnerd error as expected" # ############################### - name: create static ndb vlan with missing Configuration @@ -43,7 +43,7 @@ - result.changed == false - result.failed == true - result.msg == "Failed generating create vlan spec" - fail_msg: "fail: create static ndb vlan with missing Configuration finished succesfully" + fail_msg: "fail: create static ndb vlan with missing Configuration finished successfully" success_msg: "pass: Returnerd error as expected" ########### @@ -99,7 +99,7 @@ - result.changed == false - result.failed == true - result.msg == "Failed generating update vlan spec" - fail_msg: "fail: update dhcp ndb vlan with static Configuration finished succesfully" + fail_msg: "fail: update dhcp ndb vlan with static Configuration finished successfully" success_msg: "pass: Returnerd error as expected" ################################## @@ -120,7 +120,7 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created vlan's" - success_msg: "All vlan'sdeleted succesfully" + success_msg: "All vlan'sdeleted successfully" - set_fact: todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_ova/aliases b/tests/integration/targets/ntnx_ova/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_ova/aliases +++ b/tests/integration/targets/ntnx_ova/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_permissions_info/aliases b/tests/integration/targets/ntnx_permissions_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_permissions_info/aliases +++ b/tests/integration/targets/ntnx_permissions_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_projects/aliases b/tests/integration/targets/ntnx_projects/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_projects/aliases +++ b/tests/integration/targets/ntnx_projects/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_projects/tasks/main.yml b/tests/integration/targets/ntnx_projects/tasks/main.yml index 742cb49ba..eddf0a46d 100644 --- a/tests/integration/targets/ntnx_projects/tasks/main.yml +++ b/tests/integration/targets/ntnx_projects/tasks/main.yml @@ -9,4 +9,4 @@ - import_tasks: "create_project.yml" - import_tasks: "delete_project.yml" - import_tasks: "update_project.yml" - - import_tasks: "advanced_projects.yml" + - import_tasks: "projects_with_role_mappings.yml" diff --git a/tests/integration/targets/ntnx_projects/tasks/advanced_projects.yml b/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml similarity index 94% rename from tests/integration/targets/ntnx_projects/tasks/advanced_projects.yml rename to tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml index 6e715647e..757c8edea 100644 --- a/tests/integration/targets/ntnx_projects/tasks/advanced_projects.yml +++ b/tests/integration/targets/ntnx_projects/tasks/projects_with_role_mappings.yml @@ -14,6 +14,9 @@ project2_name: "{{random_name}}{{suffix_name}}2" project3_name: "{{random_name}}{{suffix_name}}3" +- set_fact: + ignore_errors: false + - name: Create Project with min spec ntnx_projects: name: "{{project1_name}}" @@ -25,7 +28,7 @@ user: uuid: "{{users[0]}}" register: result - ignore_errors: false + ignore_errors: "{{ignore_errors}}" - set_fact: response_acp: "{{result.response.status.access_control_policy_list_status[0].access_control_policy_status.resources}}" @@ -90,8 +93,7 @@ role: name: "{{roles[3]}}" register: result - ignore_errors: true - + ignore_errors: "{{ignore_errors}}" - set_fact: expected_subnets: ["{{ network.dhcp.uuid }}", "{{ static.uuid }}", "{{ overlay.uuid }}"] @@ -126,6 +128,8 @@ - set_fact: todelete: "{{ todelete + [ result.project_uuid ] }}" +- set_fact: + user_group_to_delete: "{{result.response.status.project_status.resources.external_user_group_reference_list[0].uuid}}" - name: Update Project role mappings and subnets and quotas @@ -311,4 +315,21 @@ ignore_errors: True - set_fact: - todelete: [] \ No newline at end of file + todelete: [] + +- name: delete user group + ntnx_user_groups: + state: absent + user_group_uuid: "{{user_group_to_delete}}" + register: result + ignore_errors: true + +- name: check listing status + assert: + that: + - result.response is defined + - result.failed == false + - result.changed == true + - result.response.status == "SUCCEEDED" or result.response.status.state == "DELETE_PENDING" + fail_msg: "Unable to delete user group " + success_msg: "user group deletd successfully" diff --git a/tests/integration/targets/ntnx_projects_info/aliases b/tests/integration/targets/ntnx_projects_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_projects_info/aliases +++ b/tests/integration/targets/ntnx_projects_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml b/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml index c21554073..0571559ca 100644 --- a/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml +++ b/tests/integration/targets/ntnx_protection_rules/tasks/protection_rules.yml @@ -337,7 +337,7 @@ - temp_result.changed == false - temp_result.response.spec.name == "test-ansible-updated-check-mode" fail_msg: "Unable to generate update spec using check mode" - success_msg: "Protection policy update spec generated succesfully" + success_msg: "Protection policy update spec generated successfully" ############################################################## DELETE Protection Policy Tests ################################################################## diff --git a/tests/integration/targets/ntnx_roles/aliases b/tests/integration/targets/ntnx_roles/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_roles/aliases +++ b/tests/integration/targets/ntnx_roles/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_roles_info/aliases b/tests/integration/targets/ntnx_roles_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_roles_info/aliases +++ b/tests/integration/targets/ntnx_roles_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_security_rules/aliases b/tests/integration/targets/ntnx_security_rules/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_security_rules/aliases +++ b/tests/integration/targets/ntnx_security_rules/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml b/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml index 07191d468..c7f082897 100644 --- a/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml +++ b/tests/integration/targets/ntnx_security_rules/tasks/app_rule.yml @@ -8,7 +8,10 @@ apptype_filter_by_category: AppFamily: - Backup - apptier: Default + apptiers: + - "{{categories.apptiers[0]}}" + - "{{categories.apptiers[1]}}" + default_internal_policy: DENY_ALL inbounds: @@ -72,13 +75,15 @@ register: result ignore_errors: true + - name: Creation Status assert: that: - result.response is defined - result.failed == false - result.response.status.state == 'COMPLETE' - - result.response.spec.name=="test_app_rule" + - result.response.status.name=="test_app_rule" + - result.response.status.resources.app_rule.target_group.filter.params.AppTier | length == 2 fail_msg: ' fail: unable to create app security rule with inbound and outbound list' success_msg: 'pass: create app security rule with inbound and outbound list successfully' @@ -140,7 +145,9 @@ apptype_filter_by_category: AppFamily: - Backup - apptier: Default + apptiers: + - "{{categories.apptiers[0]}}" + - "{{categories.apptiers[1]}}" default_internal_policy: DENY_ALL allow_all_outbounds: true allow_all_inbounds: true @@ -157,6 +164,8 @@ - result.failed == false - result.response.status.state == 'COMPLETE' - result.response.spec.name=="test_app_rule" + - result.response.status.resources.app_rule.target_group.filter.params.AppTier | length == 2 + fail_msg: ' fail: unable to create app security rule with allow all inbound and outbound list' success_msg: 'pass: create app security rule with allow all inbound and outbound list successfully' - name: delete app security rule diff --git a/tests/integration/targets/ntnx_security_rules/tasks/quarantine_rule.yml b/tests/integration/targets/ntnx_security_rules/tasks/quarantine_rule.yml index badaadfae..b4eb996af 100644 --- a/tests/integration/targets/ntnx_security_rules/tasks/quarantine_rule.yml +++ b/tests/integration/targets/ntnx_security_rules/tasks/quarantine_rule.yml @@ -32,4 +32,4 @@ fail_msg: ' fail: unable to update quarantine_rule by adding inbound and outbound list ' success_msg: >- pass: update quarantine_rule by adding inbound and outbound list - succesfully \ No newline at end of file + successfully \ No newline at end of file diff --git a/tests/integration/targets/ntnx_security_rules_info/aliases b/tests/integration/targets/ntnx_security_rules_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_security_rules_info/aliases +++ b/tests/integration/targets/ntnx_security_rules_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml b/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml index 83770606f..6d8004009 100644 --- a/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml +++ b/tests/integration/targets/ntnx_security_rules_info/tasks/get_security_rules.yml @@ -91,5 +91,5 @@ - result.failed == false - result.response.status == 'SUCCEEDED' fail_msg: ' fail: unable to delete secutiry rule ' - success_msg: 'pass: security rule deleted succesfully ' + success_msg: 'pass: security rule deleted successfully ' ################################### \ No newline at end of file diff --git a/tests/integration/targets/ntnx_service_groups/aliases b/tests/integration/targets/ntnx_service_groups/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_service_groups/aliases +++ b/tests/integration/targets/ntnx_service_groups/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_service_groups/tasks/create.yml b/tests/integration/targets/ntnx_service_groups/tasks/create.yml index 334f2ba1e..f5e592a3e 100644 --- a/tests/integration/targets/ntnx_service_groups/tasks/create.yml +++ b/tests/integration/targets/ntnx_service_groups/tasks/create.yml @@ -174,7 +174,7 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created service groups" - success_msg: "All service groups deleted succesfully" + success_msg: "All service groups deleted successfully" - set_fact: todelete: [] \ No newline at end of file diff --git a/tests/integration/targets/ntnx_service_groups/tasks/update.yml b/tests/integration/targets/ntnx_service_groups/tasks/update.yml index 8378c0f60..2544acac8 100644 --- a/tests/integration/targets/ntnx_service_groups/tasks/update.yml +++ b/tests/integration/targets/ntnx_service_groups/tasks/update.yml @@ -103,4 +103,4 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "unable to delete all created service groups" - success_msg: "All service groups deleted succesfully" \ No newline at end of file + success_msg: "All service groups deleted successfully" \ No newline at end of file diff --git a/tests/integration/targets/ntnx_service_groups_info/aliases b/tests/integration/targets/ntnx_service_groups_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_service_groups_info/aliases +++ b/tests/integration/targets/ntnx_service_groups_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_static_routes/aliases b/tests/integration/targets/ntnx_static_routes/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_static_routes/aliases +++ b/tests/integration/targets/ntnx_static_routes/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_static_routes/tasks/create.yml b/tests/integration/targets/ntnx_static_routes/tasks/create.yml index 304ebce6f..e35fc4679 100644 --- a/tests/integration/targets/ntnx_static_routes/tasks/create.yml +++ b/tests/integration/targets/ntnx_static_routes/tasks/create.yml @@ -40,7 +40,7 @@ - result.response.status.resources.default_route["nexthop"]["external_subnet_reference"]["name"] == "{{ external_nat_subnet.name }}" fail_msg: 'Fail: Unable to update static routes of vpc' - success_msg: 'Succes: static routes updated succesfully' + success_msg: 'Succes: static routes updated successfully' ########################################################################################################### @@ -69,7 +69,7 @@ - result.failed == false - "'Nothing to update' in result.msg" fail_msg: "Static routes" - success_msg: "Static routes update skipped succesfully due to no changes in spec" + success_msg: "Static routes update skipped successfully due to no changes in spec" ########################################################################################################### diff --git a/tests/integration/targets/ntnx_static_routes_info/aliases b/tests/integration/targets/ntnx_static_routes_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_static_routes_info/aliases +++ b/tests/integration/targets/ntnx_static_routes_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml b/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml index 1bcd6736c..70ee1bfe6 100644 --- a/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml +++ b/tests/integration/targets/ntnx_static_routes_info/tasks/info.yml @@ -26,7 +26,7 @@ - result.response.status.state == 'COMPLETE' - result.changed == true fail_msg: 'Fail: Unable to update static routes of vpc' - success_msg: 'Succes: static routes updated succesfully' + success_msg: 'Succes: static routes updated successfully' ########################################################################################################### diff --git a/tests/integration/targets/ntnx_user_groups/aliases b/tests/integration/targets/ntnx_user_groups/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_user_groups/aliases +++ b/tests/integration/targets/ntnx_user_groups/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_user_groups/tasks/create.yml b/tests/integration/targets/ntnx_user_groups/tasks/create.yml index 549107388..d512ad077 100644 --- a/tests/integration/targets/ntnx_user_groups/tasks/create.yml +++ b/tests/integration/targets/ntnx_user_groups/tasks/create.yml @@ -1,6 +1,6 @@ --- - debug: - msg: start ntnx_user_groups create tests + msg: start ntnx_user_groups and info tests - name: create user group ntnx_user_groups: @@ -26,48 +26,112 @@ fail_msg: "Unable to create user group " success_msg: "user group created successfully" -- name: delete user group - ntnx_user_groups: - state: absent - user_group_uuid: "{{result.user_group_uuid}}" +- set_fact: + user_group_to_delete: "{{result.user_group_uuid}}" + +#############################INFO Tests##################### + +- name: List all user groups + ntnx_user_groups_info: + register: user_groups + ignore_errors: True + +- name: Listing Status + assert: + that: + - user_groups.response is defined + - user_groups.response.metadata.total_matches > 0 + fail_msg: "Unable to list all user groups" + success_msg: "User groups info obtained successfully" + +- set_fact: + test_user_group_name: "{{user_groups.response.entities.0.status.resources.display_name}}" +- set_fact: + test_user_group_uuid: "{{user_groups.response.entities.0.metadata.uuid}}" + +################################################## + +- name: List user_groups using user_group uuid criteria + ntnx_user_groups_info: + usergroup_uuid: "{{ test_user_group_uuid }}" register: result - ignore_errors: true + ignore_errors: True -- name: check listing status +- name: Listing Status assert: that: - result.response is defined + - result.changed == false - result.failed == false - - result.changed == true - - result.response.status == "SUCCEEDED" or result.response.status.state == "DELETE_PENDING" - fail_msg: "Unable to delete user group " - success_msg: "user group deletd successfully" + - result.response.status.resources.display_name == "{{ test_user_group_name }}" + - result.response.metadata.kind == "user_group" + fail_msg: "Unable to list user group using uuid" + success_msg: "user group info obtained successfully" +################################################## -- name: create user group with idp - ntnx_user_groups: - idp: - idp_uuid: "{{identity_provider_uuid}}" - group_name: test_group_987 +- name: List user_groups using filter criteria + ntnx_user_groups_info: + filter: + name: "{{ test_user_group_name }}" register: result - ignore_errors: true + ignore_errors: True -- name: check listing status +- name: Listing Status assert: that: - result.response is defined + - result.changed == false - result.failed == false - - result.changed == true - - result.response.status.state == "COMPLETE" - - result.user_group_uuid is defined - - result.response.status.resources.display_name == "test_group_987" - fail_msg: "Unable to create user group with idp " - success_msg: "user group with idp created successfully" + - result.response.entities[0].status.resources.display_name == "{{ test_user_group_name }}" + - result.response.metadata.kind == "user_group" + - result.response.metadata.total_matches == 1 + fail_msg: "Unable to list user groups using filter" + success_msg: "user group info obtained successfully" + +################################################## + +- name: List user groups using length and offset + ntnx_user_groups_info: + length: 2 + offset: 1 + register: result + ignore_errors: True + +- name: Listing Status + assert: + that: + - result.response is defined + - result.changed == false + - result.failed == false + - result.response.metadata.total_matches > 0 + - result.response.metadata.length == 2 + + + fail_msg: "Unable to list user groups using length and offset" + success_msg: "user groups listed successfully using length and offset" +################################################## +# - name: List user groups using ascending name sorting +# ntnx_user_groups_info: +# sort_order: "ASCENDING" +# sort_attribute: "group_name" +# kind: user_group +# register: result +# ignore_errors: True + +# - name: Listing Status +# assert: +# that: +# - result.response is defined +# - result.changed == false +# - result.failed == false +# fail_msg: "Unable to list user groups using ascending name sorting" +# success_msg: "user groups listed successfully using ascending name sorting" - name: delete user group ntnx_user_groups: state: absent - user_group_uuid: "{{result.user_group_uuid}}" + user_group_uuid: "{{user_group_to_delete}}" register: result ignore_errors: true @@ -77,6 +141,44 @@ - result.response is defined - result.failed == false - result.changed == true - - result.response.status == "SUCCEEDED" - fail_msg: "Unable to delete user group with idp " - success_msg: "user group with idp deleted successfully" \ No newline at end of file + - result.response.status == "SUCCEEDED" or result.response.status.state == "DELETE_PENDING" + fail_msg: "Unable to delete user group " + success_msg: "user group deletd successfully" + + +# - name: create user group with idp +# ntnx_user_groups: +# idp: +# idp_uuid: "{{identity_provider_uuid}}" +# group_name: test_group_987 +# register: result +# ignore_errors: true + +# - name: check listing status +# assert: +# that: +# - result.response is defined +# - result.failed == false +# - result.changed == true +# - result.response.status.state == "COMPLETE" +# - result.user_group_uuid is defined +# - result.response.status.resources.display_name == "test_group_987" +# fail_msg: "Unable to create user group with idp " +# success_msg: "user group with idp created successfully" + +# - name: delete user group +# ntnx_user_groups: +# state: absent +# user_group_uuid: "{{result.user_group_uuid}}" +# register: result +# ignore_errors: true + +# - name: check listing status +# assert: +# that: +# - result.response is defined +# - result.failed == false +# - result.changed == true +# - result.response.status == "SUCCEEDED" +# fail_msg: "Unable to delete user group with idp " +# success_msg: "user group with idp deleted successfully" \ No newline at end of file diff --git a/tests/integration/targets/ntnx_user_groups_info/aliases b/tests/integration/targets/ntnx_user_groups_info/aliases deleted file mode 100644 index 7a68b11da..000000000 --- a/tests/integration/targets/ntnx_user_groups_info/aliases +++ /dev/null @@ -1 +0,0 @@ -disabled diff --git a/tests/integration/targets/ntnx_user_groups_info/meta/main.yml b/tests/integration/targets/ntnx_user_groups_info/meta/main.yml deleted file mode 100644 index e4f447d3a..000000000 --- a/tests/integration/targets/ntnx_user_groups_info/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_env diff --git a/tests/integration/targets/ntnx_user_groups_info/tasks/main.yml b/tests/integration/targets/ntnx_user_groups_info/tasks/main.yml deleted file mode 100644 index e2a93aa22..000000000 --- a/tests/integration/targets/ntnx_user_groups_info/tasks/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- module_defaults: - group/nutanix.ncp.ntnx: - nutanix_host: "{{ ip }}" - nutanix_username: "{{ username }}" - nutanix_password: "{{ password }}" - validate_certs: "{{ validate_certs }}" - block: - - import_tasks: "user_groups_info.yml" diff --git a/tests/integration/targets/ntnx_user_groups_info/tasks/user_groups_info.yml b/tests/integration/targets/ntnx_user_groups_info/tasks/user_groups_info.yml deleted file mode 100644 index a85155f9a..000000000 --- a/tests/integration/targets/ntnx_user_groups_info/tasks/user_groups_info.yml +++ /dev/null @@ -1,100 +0,0 @@ -- debug: - msg: start testing ntnx_user_groups_info -################################################## - -- name: List all user groups - ntnx_user_groups_info: - register: user_groups - ignore_errors: True - -- name: Listing Status - assert: - that: - - user_groups.response is defined - - user_groups.response.metadata.total_matches > 0 - fail_msg: "Unable to list all user groups" - success_msg: "User groups info obtained successfully" - -- set_fact: - test_user_group_name: "{{user_groups.response.entities.1.status.resources.display_name}}" -- set_fact: - test_user_group_uuid: "{{user_groups.response.entities.1.metadata.uuid}}" - -################################################## - -- name: List user_groups using user_group uuid criteria - ntnx_user_groups_info: - usergroup_uuid: "{{ test_user_group_uuid }}" - register: result - ignore_errors: True - -- name: Listing Status - assert: - that: - - result.response is defined - - result.changed == false - - result.failed == false - - result.response.status.resources.display_name == "{{ test_user_group_name }}" - - result.response.metadata.kind == "user_group" - fail_msg: "Unable to list user group using uuid" - success_msg: "user group info obtained successfully" - -################################################## - -- name: List user_groups using filter criteria - ntnx_user_groups_info: - filter: - name: "{{ test_user_group_name }}" - register: result - ignore_errors: True - -- name: Listing Status - assert: - that: - - result.response is defined - - result.changed == false - - result.failed == false - - result.response.entities[0].status.resources.display_name == "{{ test_user_group_name }}" - - result.response.metadata.kind == "user_group" - - result.response.metadata.total_matches == 1 - fail_msg: "Unable to list user groups using filter" - success_msg: "user group info obtained successfully" - -################################################## - -- name: List user groups using length and offset - ntnx_user_groups_info: - length: 2 - offset: 1 - register: result - ignore_errors: True - -- name: Listing Status - assert: - that: - - result.response is defined - - result.changed == false - - result.failed == false - - result.response.metadata.total_matches > 0 - - result.response.metadata.length == 2 - - - fail_msg: "Unable to list user groups using length and offset" - success_msg: "user groups listed successfully using length and offset" -################################################## -# - name: List user groups using ascending name sorting -# ntnx_user_groups_info: -# sort_order: "ASCENDING" -# sort_attribute: "group_name" -# kind: user_group -# register: result -# ignore_errors: True - -# - name: Listing Status -# assert: -# that: -# - result.response is defined -# - result.changed == false -# - result.failed == false -# fail_msg: "Unable to list user groups using ascending name sorting" -# success_msg: "user groups listed successfully using ascending name sorting" \ No newline at end of file diff --git a/tests/integration/targets/ntnx_users/aliases b/tests/integration/targets/ntnx_users/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_users/aliases +++ b/tests/integration/targets/ntnx_users/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_users/tasks/create.yml b/tests/integration/targets/ntnx_users/tasks/create.yml index 60d92b342..843a46535 100644 --- a/tests/integration/targets/ntnx_users/tasks/create.yml +++ b/tests/integration/targets/ntnx_users/tasks/create.yml @@ -61,7 +61,7 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "Fail: unable to delete all users" - success_msg: "Pass: all users deleted succesfully" + success_msg: "Pass: all users deleted successfully" - set_fact: todelete: [] ################################################# @@ -118,27 +118,27 @@ fail_msg: "Fail: create wrong user" success_msg: "Pass: Returned as expected" ################################################# -- name: create idp user - ntnx_users: - identity_provider_uuid: "{{identity_provider_uuid}}" - username: testing_user - register: result - ignore_errors: true +# - name: create idp user +# ntnx_users: +# identity_provider_uuid: "{{identity_provider_uuid}}" +# username: testing_user +# register: result +# ignore_errors: true -- name: check listing status - assert: - that: - - result.response is defined - - result.changed == true - - result.failed == false - - result.user_uuid is defined - - result.response.status.state == "COMPLETE" - - result.response.status.name == "testing_user" - fail_msg: "Fail: unable to create idp user" - success_msg: "Pass: idp user created succesfully" +# - name: check listing status +# assert: +# that: +# - result.response is defined +# - result.changed == true +# - result.failed == false +# - result.user_uuid is defined +# - result.response.status.state == "COMPLETE" +# - result.response.status.name == "testing_user" +# fail_msg: "Fail: unable to create idp user" +# success_msg: "Pass: idp user created successfully" -- set_fact: - todelete: "{{ todelete + [ result.user_uuid ] }}" +# - set_fact: +# todelete: "{{ todelete + [ result.user_uuid ] }}" ################################################# - name: Delete created users ntnx_users: @@ -153,7 +153,7 @@ - result.changed == true - result.msg == "All items completed" fail_msg: "Fail: unable to delete all users" - success_msg: "Pass: all users deleted succesfully" + success_msg: "Pass: all users deleted successfully" - set_fact: todelete: [] @@ -182,4 +182,4 @@ - result.response is defined - result.response.status == 'SUCCEEDED' fail_msg: "Fail: unable to delete users" - success_msg: "Pass: users deleted succesfully" \ No newline at end of file + success_msg: "Pass: users deleted successfully" \ No newline at end of file diff --git a/tests/integration/targets/ntnx_users_info/aliases b/tests/integration/targets/ntnx_users_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_users_info/aliases +++ b/tests/integration/targets/ntnx_users_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/ntnx_vms_clone/aliases b/tests/integration/targets/ntnx_vms_clone/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/ntnx_vms_clone/aliases +++ b/tests/integration/targets/ntnx_vms_clone/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/nutanix_floating_ips/aliases b/tests/integration/targets/nutanix_floating_ips/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/nutanix_floating_ips/aliases +++ b/tests/integration/targets/nutanix_floating_ips/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/nutanix_floating_ips_info/aliases b/tests/integration/targets/nutanix_floating_ips_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/nutanix_floating_ips_info/aliases +++ b/tests/integration/targets/nutanix_floating_ips_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/nutanix_pbrs/aliases b/tests/integration/targets/nutanix_pbrs/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/nutanix_pbrs/aliases +++ b/tests/integration/targets/nutanix_pbrs/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/nutanix_pbrs_info/aliases b/tests/integration/targets/nutanix_pbrs_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/nutanix_pbrs_info/aliases +++ b/tests/integration/targets/nutanix_pbrs_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/nutanix_subnets/aliases b/tests/integration/targets/nutanix_subnets/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/nutanix_subnets/aliases +++ b/tests/integration/targets/nutanix_subnets/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/nutanix_subnets_info/aliases b/tests/integration/targets/nutanix_subnets_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/nutanix_subnets_info/aliases +++ b/tests/integration/targets/nutanix_subnets_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/nutanix_vms/aliases b/tests/integration/targets/nutanix_vms/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/nutanix_vms/aliases +++ b/tests/integration/targets/nutanix_vms/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/nutanix_vms_info/aliases b/tests/integration/targets/nutanix_vms_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/nutanix_vms_info/aliases +++ b/tests/integration/targets/nutanix_vms_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/nutanix_vpcs/aliases b/tests/integration/targets/nutanix_vpcs/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/nutanix_vpcs/aliases +++ b/tests/integration/targets/nutanix_vpcs/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/nutanix_vpcs_info/aliases b/tests/integration/targets/nutanix_vpcs_info/aliases index 7a68b11da..8b1378917 100644 --- a/tests/integration/targets/nutanix_vpcs_info/aliases +++ b/tests/integration/targets/nutanix_vpcs_info/aliases @@ -1 +1 @@ -disabled + diff --git a/tests/integration/targets/prepare_env/tasks/cleanup.yml b/tests/integration/targets/prepare_env/tasks/cleanup.yml index 94cb70cf4..9a2ceb7ac 100644 --- a/tests/integration/targets/prepare_env/tasks/cleanup.yml +++ b/tests/integration/targets/prepare_env/tasks/cleanup.yml @@ -5,16 +5,18 @@ collections: - nutanix.ncp tasks: + - name: include var file include_vars: ../vars/main.yml -# - name: Delete VM -# ntnx_vms: -# vm_uuid: '{{vm.uuid }}' -# state: absent -# nutanix_host: "{{ ip }}" -# nutanix_username: "{{ username }}" -# nutanix_password: "{{ password }}" -# validate_certs: False + - name: Delete VM + ignore_errors: true + ntnx_vms: + vm_uuid: '{{vm.uuid }}' + state: absent + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: False # # - name: Delete DR VM # # ntnx_vms: # # vm_uuid: '{{dr_vm.uuid }}' @@ -23,37 +25,41 @@ # # nutanix_username: "{{ username }}" # # nutanix_password: "{{ password }}" # # validate_certs: False -# - name: Delete overlay -# ntnx_subnets: -# state: absent -# nutanix_host: "{{ ip }}" -# nutanix_username: "{{ username }}" -# nutanix_password: "{{ password }}" -# validate_certs: false -# subnet_uuid: "{{item }}" -# loop: -# - "{{overlay.uuid}}" -# - name: Delete vpc -# ntnx_vpcs: -# state: absent -# nutanix_host: "{{ ip }}" -# nutanix_username: "{{ username }}" -# nutanix_password: "{{ password }}" -# validate_certs: False -# vpc_uuid: "{{ vpc.uuid }}" -# - name: Delete subnets -# ntnx_subnets: -# state: absent -# nutanix_host: "{{ ip }}" -# nutanix_username: "{{ username }}" -# nutanix_password: "{{ password }}" -# validate_certs: false -# subnet_uuid: "{{item }}" -# loop: -# - "{{external_nat_subnet.uuid}}" -# - "{{static.uuid}}" + - name: Delete overlay + ignore_errors: true + ntnx_subnets: + state: absent + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + subnet_uuid: "{{item }}" + loop: + - "{{overlay.uuid}}" + - name: Delete vpc + ignore_errors: true + ntnx_vpcs: + state: absent + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: False + vpc_uuid: "{{ vpc.uuid }}" + - name: Delete subnets + ignore_errors: true + ntnx_subnets: + state: absent + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: false + subnet_uuid: "{{item }}" + loop: + - "{{external_nat_subnet.uuid}}" + - "{{static.uuid}}" -# - name: Delete downloaded disk file -# file: -# path: "{{ disk_image.dest }}" -# state: absent \ No newline at end of file + - name: Delete downloaded disk file + ignore_errors: true + file: + path: "{{ disk_image.dest }}" + state: absent \ No newline at end of file diff --git a/tests/integration/targets/prepare_env/tasks/prepare_env.yml b/tests/integration/targets/prepare_env/tasks/prepare_env.yml index 3c6c59d97..55f1e4fca 100644 --- a/tests/integration/targets/prepare_env/tasks/prepare_env.yml +++ b/tests/integration/targets/prepare_env/tasks/prepare_env.yml @@ -8,161 +8,163 @@ tasks: - name: include var file include_vars: ../vars/main.yml -# - set_fact: -# ip: "{{lookup('env', 'NUTANIX_HOST') }}" -# username: "{{lookup('env', 'NUTANIX_USERNAME') }}" -# password: "{{lookup('env', 'NUTANIX_PASSWORD') }}" -# recovery_site_ip: "{{lookup('env', 'NUTANIX_DR_SITE')}}" -# - name: Insert credentials block to vars -# blockinfile: -# path: ../vars/main.yml -# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 0" -# block: | -# ip: "{{lookup('env', 'NUTANIX_HOST') }}" -# username: "{{lookup('env', 'NUTANIX_USERNAME') }}" -# password: "{{lookup('env', 'NUTANIX_PASSWORD') }}" -# recovery_site_ip: "{{lookup('env', 'NUTANIX_DR_SITE') }}" -# - name: create external subnet with NAT -# ntnx_subnets: -# state: present -# nutanix_host: "{{ ip }}" -# validate_certs: false -# nutanix_username: "{{ username }}" -# nutanix_password: "{{ password }}" -# name: "{{external_nat_subnets.name}}" -# external_subnet: -# vlan_id: "{{external_nat_subnets.vlan_id}}" -# enable_nat: True -# cluster: -# name: "{{ cluster.name }}" -# ipam: -# network_ip: "{{ external_nat_subnets.network_ip }}" -# network_prefix: "{{ external_nat_subnets.network_prefix }}" -# gateway_ip: "{{ external_nat_subnets.gateway_ip_address }}" -# ip_pools: -# - start_ip: "{{ external_nat_subnets.dhcp.start_address }}" -# end_ip: "{{ external_nat_subnets.dhcp.end_address }}" -# register: result -# - name: Insert external subnet configuration block to vars -# blockinfile: -# path: ../vars/main.yml -# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 1" -# block: | -# external_nat_subnet: -# name: "{{external_nat_subnets.name}}" -# uuid: "{{result.subnet_uuid}}" + - set_fact: + ip: "{{lookup('env', 'NUTANIX_HOST') }}" + username: "{{lookup('env', 'NUTANIX_USERNAME') }}" + password: "{{lookup('env', 'NUTANIX_PASSWORD') }}" + recovery_site_ip: "{{lookup('env', 'NUTANIX_DR_SITE')}}" + validate_certs: false + - name: Insert credentials block to vars + blockinfile: + path: ../vars/main.yml + marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 0" + block: | + ip: "{{lookup('env', 'NUTANIX_HOST') }}" + username: "{{lookup('env', 'NUTANIX_USERNAME') }}" + password: "{{lookup('env', 'NUTANIX_PASSWORD') }}" + recovery_site_ip: "{{lookup('env', 'NUTANIX_DR_SITE') }}" + validate_certs: false + - name: create external subnet with NAT + ntnx_subnets: + state: present + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: "{{external_nat_subnets.name}}" + external_subnet: + vlan_id: "{{external_nat_subnets.vlan_id}}" + enable_nat: True + cluster: + name: "{{ cluster.name }}" + ipam: + network_ip: "{{ external_nat_subnets.network_ip }}" + network_prefix: "{{ external_nat_subnets.network_prefix }}" + gateway_ip: "{{ external_nat_subnets.gateway_ip_address }}" + ip_pools: + - start_ip: "{{ external_nat_subnets.dhcp.start_address }}" + end_ip: "{{ external_nat_subnets.dhcp.end_address }}" + register: result + - name: Insert external subnet configuration block to vars + blockinfile: + path: ../vars/main.yml + marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 1" + block: | + external_nat_subnet: + name: "{{external_nat_subnets.name}}" + uuid: "{{result.subnet_uuid}}" -# - name: Create min VPC with external_subnet uuid -# ntnx_vpcs: -# validate_certs: False -# state: present -# wait: true -# nutanix_host: "{{ ip }}" -# nutanix_username: "{{ username }}" -# nutanix_password: "{{ password }}" -# name: "{{vpc_name}}" -# external_subnets: -# - subnet_uuid: "{{ result.subnet_uuid }}" -# register: result + - name: Create min VPC with external_subnet uuid + ntnx_vpcs: + validate_certs: False + state: present + wait: true + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: "{{vpc_name}}" + external_subnets: + - subnet_uuid: "{{ result.subnet_uuid }}" + register: result -# - name: Insert VPC configuration block to vars -# blockinfile: -# path: ../vars/main.yml -# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 2" -# block: | -# vpc: -# name: "{{vpc_name}}" -# uuid: "{{result.vpc_uuid}}" + - name: Insert VPC configuration block to vars + blockinfile: + path: ../vars/main.yml + marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 2" + block: | + vpc: + name: "{{vpc_name}}" + uuid: "{{result.vpc_uuid}}" -# - name: create vlan subnet with IPAM -# ntnx_subnets: -# state: present -# nutanix_host: "{{ ip }}" -# wait: true -# validate_certs: false -# nutanix_username: "{{ username }}" -# nutanix_password: "{{ password }}" -# name: "{{static_subnet_name}}" -# vlan_subnet: -# vlan_id: 373 -# virtual_switch: -# name: vs0 -# cluster: -# name: "{{ cluster.name }}" -# ipam: -# network_ip: 10.30.30.0 -# network_prefix: 24 -# gateway_ip: 10.30.30.254 -# ip_pools: -# - start_ip: 10.30.30.10 -# end_ip: 10.30.30.90 -# register: result + - name: create vlan subnet with IPAM + ntnx_subnets: + state: present + nutanix_host: "{{ ip }}" + wait: true + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: "{{static_subnet_name}}" + vlan_subnet: + vlan_id: 373 + virtual_switch: + name: vs0 + cluster: + name: "{{ cluster.name }}" + ipam: + network_ip: 10.30.30.0 + network_prefix: 24 + gateway_ip: 10.30.30.254 + ip_pools: + - start_ip: 10.30.30.10 + end_ip: 10.30.30.90 + register: result -# - name: Insert vlan subnet configuration block to var file -# blockinfile: -# path: ../vars/main.yml -# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 3" -# block: | -# static: -# name: "{{static_subnet_name}}" -# uuid: "{{result.subnet_uuid}}" -# network_ip: 10.30.30.0 -# network_prefix: 24 -# gateway_ip: 10.30.30.254 + - name: Insert vlan subnet configuration block to var file + blockinfile: + path: ../vars/main.yml + marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 3" + block: | + static: + name: "{{static_subnet_name}}" + uuid: "{{result.subnet_uuid}}" + network_ip: 10.30.30.0 + network_prefix: 24 + gateway_ip: 10.30.30.254 -# - name: include var file -# include_vars: ../vars/main.yml -# - name: create overlay Subnet with minimum requirements -# ntnx_subnets: -# state: present -# nutanix_host: "{{ ip }}" -# validate_certs: false -# nutanix_username: "{{ username }}" -# nutanix_password: "{{ password }}" -# name: "{{overlay_subnet.name}}" -# overlay_subnet: -# vpc: -# uuid: "{{ vpc.uuid }}" -# ipam: -# network_ip: "{{overlay_subnet.network_ip}}" -# network_prefix: "{{overlay_subnet.network_prefix}}" -# gateway_ip: "{{overlay_subnet.gateway_ip}}" -# register: result + - name: include var file + include_vars: ../vars/main.yml + - name: create overlay Subnet with minimum requirements + ntnx_subnets: + state: present + nutanix_host: "{{ ip }}" + validate_certs: false + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + name: "{{overlay_subnet.name}}" + overlay_subnet: + vpc: + uuid: "{{ vpc.uuid }}" + ipam: + network_ip: "{{overlay_subnet.network_ip}}" + network_prefix: "{{overlay_subnet.network_prefix}}" + gateway_ip: "{{overlay_subnet.gateway_ip}}" + register: result -# - name: Insert overlay subnet configuration block to var file -# blockinfile: -# path: ../vars/main.yml -# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 4" -# block: | -# overlay: -# name: "{{overlay_subnet.name}}" -# uuid: "{{result.subnet_uuid}}" + - name: Insert overlay subnet configuration block to var file + blockinfile: + path: ../vars/main.yml + marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 4" + block: | + overlay: + name: "{{overlay_subnet.name}}" + uuid: "{{result.subnet_uuid}}" -# - name: create VM with overlay subnet -# ntnx_vms: -# state: present -# nutanix_host: "{{ ip }}" -# nutanix_username: "{{ username }}" -# nutanix_password: "{{ password }}" -# validate_certs: False -# name: "{{vm_name}}" -# cluster: -# uuid: "{{ cluster.uuid }}" -# networks: -# - is_connected: true -# subnet: -# name: "{{overlay_subnet.name}}" -# private_ip: "{{overlay_subnet.private_ip}}" -# register: result + - name: create VM with overlay subnet + ntnx_vms: + state: present + nutanix_host: "{{ ip }}" + nutanix_username: "{{ username }}" + nutanix_password: "{{ password }}" + validate_certs: False + name: "{{vm_name}}" + cluster: + uuid: "{{ cluster.uuid }}" + networks: + - is_connected: true + subnet: + name: "{{overlay_subnet.name}}" + private_ip: "{{overlay_subnet.private_ip}}" + register: result -# - name: Insert vm configuration block to var file -# blockinfile: -# path: ../vars/main.yml -# marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 5" -# block: | -# vm: -# name: "{{vm_name}}" -# uuid: "{{result.vm_uuid}}" + - name: Insert vm configuration block to var file + blockinfile: + path: ../vars/main.yml + marker: "# {mark} ANSIBLE MANAGED BLOCK insertion 5" + block: | + vm: + name: "{{vm_name}}" + uuid: "{{result.vm_uuid}}" # # - name: create VM with static subnet for dr tests # # ntnx_vms: @@ -196,16 +198,17 @@ # # name: "{{dr_vm_name}}" # # uuid: "{{result.vm_uuid}}" -# - name: Downloading disk image for image related tests -# get_url: -# url: "{{ disk_image.url }}" -# dest: "{{ disk_image.dest }}" + - name: Downloading disk image for image related tests + get_url: + url: "{{ disk_image.url }}" + dest: "{{ disk_image.dest }}" -# # - name: create address group for network security policy related tests -# # ntnx_address_groups: -# # state: present -# # name: dest -# # desc: dest -# # subnets: -# # - network_ip: "10.1.1.0" -# # network_prefix: 24 \ No newline at end of file + # - name: create address group for network security policy related tests + # ntnx_address_groups: + # state: present + # name: dest + # desc: dest + # subnets: + # - network_ip: "10.1.1.0" + # network_prefix: 24 + # ignore_errors: true \ No newline at end of file