Browse Source

Issue #517: Test automation script for Component Role

Signed-off-by: VishnupriyaKrish <Vishnupriya_Krishnar@Dellteam.com>
VishnupriyaKrish 3 years ago
parent
commit
48208e3f1b

+ 2 - 1
.all-contributorsrc

@@ -18,7 +18,8 @@
         "ideas",
         "maintenance",
         "mentoring",
-        "design"
+        "design",
+        "review"
       ]
     },
     {

File diff suppressed because it is too large
+ 1 - 1
README.md


+ 901 - 0
test/test_omnia_1.1.yml

@@ -0,0 +1,901 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+#Testcase OMNIA_1.1_US_CRD_TC_001
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with default parameters
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: localhost
+
+  tasks:
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_001
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_001
+
+#Testcase OMNIA_1.1_US_CRD_TC_005
+# Execute omnia.yml with addition of new compute node
+- name: OMNIA_1.1_US_CRD_TC_005
+  hosts: localhost
+
+  tasks:
+  
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_005
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_005
+
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_005
+
+    - name: Creating test inventory file 
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+        
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}          
+          {{ host5 }}
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+
+    - name: Verify if new compute node is added
+      command: ansible --list-hosts compute -i test_inventory.yml
+      changed_when: false
+      register: compute_info
+      tags: TC_005
+
+    - name: Validate compute node
+      assert:
+         that: 
+           - "'{{ host5 }}' in compute_info.stdout"
+         success_msg: "{{ compute_node_success_msg }}"
+         fail_msg: "{{ compute_node_fail_msg }}"
+      tags: TC_005
+
+#Testcase OMNIA_1.1_US_CRD_TC_006
+# Execute omnia.yml after removal of new compute node
+- name: OMNIA_1.1_US_CRD_TC_006
+  hosts: localhost
+
+  tasks:
+
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_006
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_006
+
+    - name: Delete one compute node
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }} 
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+      tags: TC_006
+       
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_006
+
+    - name: Verify if one compute node is deleted
+      command:  ansible --list-hosts compute -i test_inventory.yml
+      register: compute_info
+      changed_when: false
+      tags: TC_006
+
+    - name: Validate compute node
+      assert:
+         that: 
+           - "'{{ host5 }}' not in compute_info.stdout"
+         success_msg: "{{ compute_node_del_success_msg }}"
+         fail_msg: "{{ compute_node_del_fail_msg }}"
+      tags: TC_006
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_006
+
+#Testcase OMNIA_1.1_US_CRD_TC_008
+# Execute Jupyterhub.yml and then Kubeflow.yml
+- name: OMNIA_1.1_US_CRD_TC_008
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_008
+
+    - name: Install Jupyterhub
+      command: ansible-playbook platforms/jupyterhub.yml -i inventory
+      changed_when: false
+      args:
+        chdir: ../
+      tags: TC_008
+
+    - name: Install Kubeflow
+      command: ansible-playbook platforms/kubeflow.yml -i inventory
+      changed_when: false
+      args:
+        chdir: ../
+      tags: TC_008
+
+- name: OMNIA_1.1_US_CRD_TC_008
+  hosts: manager
+  vars_files:
+    - test_vars/test_jupyterhub_vars.yml
+    - test_vars/test_kubeflow_vars.yml
+
+  tasks:
+    - name: Waiting for the pods deployment
+      pause:
+        minutes: 5
+      tags: TC_008
+      
+    - name: Checking K8s services
+      command: kubectl get services
+      register: k8s_services
+      changed_when: false
+      failed_when: True
+      tags: TC_008
+
+    - name: Validating JupyterHub services
+      assert:
+        that:
+          - "'hub' in k8s_services.stdout"
+          - "'proxy-public' in k8s_services.stdout"
+          - "'proxy-api' in k8s_services.stdout"
+        fail_msg: "{{ jupyterhub_services_fail_msg }}"
+        success_msg: "{{ jupyterhub_services_success_msg }}"
+      tags: TC_008
+    
+    - name: Checking all running pods under jupyterhub namespace
+      command: kubectl get pods --namespace jupyterhub --field-selector=status.phase=Running
+      register: jupyterhub_running_pods
+      changed_when: false
+      failed_when: True
+      tags: TC_008
+
+    - name: Validating JupyterHub pods
+      assert:
+        that:
+          - "'hub' in default_jupyterhub_pods.stdout"
+          - "'proxy' in default_jupyterhub_pods.stdout"
+        fail_msg: "{{ jupyterhub_pods_fail_msg }}"
+        success_msg: "{{ jupyterhub_pods_success_msg }}"
+      tags: TC_008
+
+    - name: Checking installed Kubeflow version
+      command: kfctl version
+      register: kfctl_version
+      changed_when: false
+      failed_when: True
+      tags: TC_008
+
+    - name: Checking pods under kubeflow namespace
+      command: kubectl get pods --namespace kubeflow
+      register: kubeflow_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_008
+
+    - name: Checking pods under istio-system namespace
+      command: kubectl get pods --namespace istio-system
+      register: istio_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_008
+
+    - name: Validating Kubeflow Installation
+      assert:
+        that:
+          - "'command not found' not in kfctl_version.stdout"
+        fail_msg: "{{ kubeflow_install_fail_msg }}"
+        success_msg: "{{ kubeflow_install_success_msg }}"
+      tags: TC_008
+
+    - name: Validating Kubeflow pods deployment
+      assert:
+        that:
+          - "'Running' in kubeflow_pods.stdout or 'ContainerCreating' in kubeflow_pods.stdout"
+          - "'Running' in istio_system_pods.stdout or 'ContainerCreating' in istio_system_pods.stdout"
+        fail_msg: "{{ kubeflow_pods_deployment_fail_msg }}"
+        success_msg: "{{ kubeflow_pods_deployment_success_msg }}"
+      tags: TC_008
+
+#Testcase OMNIA_1.1_US_CRD_TC_009
+# Execute omnia.yml and reboot all the nodes
+- name: OMNIA_1.1_US_CRD_TC_009
+  hosts: localhost
+  vars_files:
+    - test_vars/test_k8s_common_vars.yml
+    - test_vars/test_slurm_common_vars.yml
+
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_009
+
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_009
+
+
+    - name: Reboot the  nodes
+      command: ansible all -i ../inventory -b -B 1 -P 0 -m shell -a "sleep {{ sleep_time }} && reboot"
+      changed_when: false
+      tags: TC_009
+
+    - name: Waiting for services to restart
+      pause:
+         minutes: "{{ pod_time }}"
+      tags: TC_009
+
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_009
+
+
+# Testcase OMNIA_1.1_US_CRD_TC_002
+# Execute omnia.yml with single node scenario (manager, compute and login node on same server)
+- name: OMNIA_1.1_US_CRD_TC_002
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_002
+
+    - name: Creating test inventory file for single node scenario
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host1 }}  
+          
+          [login_node]
+          {{ host1 }}
+           
+          [nfs_node]
+      tags: TC_002
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_002
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_invalid }}"'
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_002
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      failed_when: true
+      changed_when: false
+      register: db_error
+      args: 
+        chdir: ../
+      tags: TC_002
+      
+    - name: Validate mariadb password error
+      assert:
+        that:
+          - '" mariadb_password not given in correct format" not in db_error.stdout'
+        fail_msg: "{{ mariadb_password_error_fail_msg }}"
+        success_msg: "{{ mariadb_password_error_success_msg }}"
+      tags: TC_002
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_002
+
+# Testcase OMNIA_1.1_US_CRD_TC_003
+# Execute omnia.yml with single node scenario (manager, compute,login,nfs node on same server) 
+- name: OMNIA_1.1_US_CRD_TC_003
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_003
+
+    - name: Creating inventory file for single node scenario
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host1 }}  
+          
+          [login_node]
+          {{ host1 }}
+
+          [nfs_node]
+      tags: TC_003
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_003
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_invalid }}"'
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_003
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      failed_when: true
+      changed_when: false
+      register: db_error
+      args: 
+        chdir: ../
+      tags: TC_003
+      
+    - name: Validate mariadb password error
+      assert:
+        that:
+          - '" mariadb_password not given in correct format" not in db_error.stdout'
+        fail_msg: "{{ mariadb_password_error_fail_msg }}"
+        success_msg: "{{ mariadb_password_error_success_msg }}"
+      tags: TC_003
+
+    - name: Delete the inventory file
+      changed_when: false
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_003
+
+#Testcase OMNIA_1.1_US_CRD_TC_004
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node on new kubernetes version
+- name: OMNIA_1.1_US_CRD_TC_004
+  hosts: localhost
+
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_004
+
+    - name: Creating test inventory
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}  
+          
+          [login_node]
+          {{ host3 }}
+         
+          [nfs_node]
+
+      tags: TC_004
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_004
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_two }}"'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_complex }}"'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_default }}"'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_version: ""'
+        replace: 'k8s_version: "{{ k8s_new_version }}"'
+      tags: TC_004
+
+    
+    - name: Execute omnia.yml 
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_004
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_004
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_004
+
+#Testcase OMNIA_1.1_US_CRD_TC_007
+# Execute omnia.yml after redeploying the cluster
+- name: OMNIA_1.1_US_CRD_TC_007
+  hosts: localhost
+
+  tasks:
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_007
+
+    - name: Re Execute omnia.yml
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_007
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_007
+
+# Testcase OMNIA_1.1_US_CRD_TC_010
+# Execute omnia.yml with same server for manager and compute with slurm first and kubernetes later
+- name: OMNIA_1.1_US_CRD_TC_010
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_010
+
+    - name: Creating test inventory file
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host1 }}  
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+      tags: TC_010
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_010
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_default }}"'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'ipa_admin_password: ""'
+        replace: 'ipa_admin_password: "{{ ipa_passwd_default }}"' 
+      tags: TC_010
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags kubernetes
+      failed_when: true
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_010
+
+    - name: Re Execute omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags slurm,freeipa
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_010
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_010
+
+
+
+# Testcase OMNIA_1.1_US_CRD_TC_011
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with slurm first and kubernetes later
+- name: OMNIA_1.1_US_CRD_TC_011
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_011
+
+    - name: Creating inventory file for
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}  
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+          {{ host4 }}
+      tags: TC_011
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_011
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_default }}"'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required: '
+        replace: 'login_node_required: true'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'ipa_admin_password: ""'
+        replace: 'ipa_admin_password: "{{ ipa_passwd_complex }}"'
+      tags: TC_011
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags kubernetes
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_011
+    
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags slurm,freeipa
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_011
+
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_011
+
+    - name: Delete the inventory file
+      changed_when: false
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_011
+
+# Testcase OMNIA_1.1_US_CRD_TC_012
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with slurm first and kubernetes later
+- name: OMNIA_1.1_US_CRD_TC_012
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_012
+
+    - name: Creating test inventory file 
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}  
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+      tags: TC_012
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_012
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_two }}"'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_default }}"'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: false'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'ipa_admin_password: ""'
+        replace: 'ipa_admin_password: "{{ ipa_passwd_invalid }}"'
+      tags: TC_012
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags kubernetes
+      failed_when: true
+      changed_when: false
+      register: ipa_error
+      args: 
+        chdir: ../
+      tags: TC_012
+      
+    - name: Validate ipa admin password error
+      assert:
+        that:
+          - '" Incorrect format provided for ipa_admin_password" not in ipa_error.stdout'
+        fail_msg: "{{ ipa_password_error_fail_msg }}"
+        success_msg: "{{ ipa_password_error_success_msg }}"
+      tags: TC_012
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_012

+ 468 - 0
test/test_omnia_validation.yml

@@ -0,0 +1,468 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+# Testcase OMNIA_1.1_US_CRD_TC_001
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with default parameters
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: manager, compute
+  vars_files:
+    - test_vars/test_k8s_common_vars.yml
+    - test_vars/test_slurm_common_vars.yml
+  tasks:
+    - name: Checking K8s service status
+      systemd:
+        name: kubelet
+      register: kubelet_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating K8s service status
+      assert:
+        that:
+          - kubelet_service.status.ActiveState == 'active'
+        fail_msg: "{{ kubelet_service_fail_msg }}"
+        success_msg: "{{ kubelet_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking munge service status
+      systemd:
+        name: munge
+      register: munge_service
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating munge service status
+      assert:
+        that:
+          - munge_service.status.ActiveState == 'active'
+        fail_msg: "{{ munge_service_fail_msg }}"
+        success_msg: "{{ munge_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: manager
+  vars_files:
+    - test_vars/test_k8s_start_manager_workers_vars.yml
+    - test_vars/test_k8s_start_services_vars.yml
+    - test_vars/test_slurmexporter_vars.yml
+    - test_vars/test_slurm_start_services_vars.yml
+    - test_vars/test_login_server_vars.yml
+    - test_vars/test_slurm_manager_vars.yml
+    - test_vars/test_login_node_vars.yml
+
+  tasks:      
+    - name: Checking kube-system pods
+      command: kubectl get pods --namespace kube-system --field-selector=status.phase=Running
+      register: kube_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating controller-manager and scheduler and coreDNS pods status
+      assert:
+        that:
+          - "'kube-scheduler' in kube_system_pods.stdout"
+          - "'kube-controller' in kube_system_pods.stdout"
+        fail_msg: "{{ controller_scheduler_status_fail_msg }}"
+        success_msg: "{{ controller_scheduler_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating coreDNS pods status
+      assert:
+        that:
+          - "'coredns' in kube_system_pods.stdout"
+        fail_msg: "{{ coredns_status_fail_msg }}"
+        success_msg: "{{ coredns_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking all running pods
+      command: kubectl get pods --all-namespaces --field-selector=status.phase=Running
+      register: running_pods
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating Metallb, Prometheus and MPI pods
+      assert:
+        that:
+          - "'metallb' in running_pods.stdout"
+          - "'prometheus' in running_pods.stdout"
+          - "'mpi-operator' in running_pods.stdout"
+        fail_msg: "{{ metallb_prometheus_mpi_pods_fail_msg }}"
+        success_msg: "{{ metallb_prometheus_mpi_pods_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating K8s dashboard
+      assert:
+        that:
+          - "'kubernetes-dashboard' in running_pods.stdout"
+        fail_msg: "{{ kubernetes_dashboard_fail_msg }}"
+        success_msg: "{{ kubernetes_dashboard_success_msg }}"
+      tags: VERIFY_OMNIA_01  
+    
+    - name: Verify slurm exporter status
+      systemd:
+        name: prometheus-slurm-exporter
+      register: slurm_exporter_status
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm exporter service status
+      assert:
+        that:
+          - slurm_exporter_status.status.ActiveState == 'active'
+        fail_msg: "{{ slurm_exporter_service_fail_msg }}"
+        success_msg: "{{ slurm_exporter_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Verify slurm exporter job in k8s services
+      shell: >-
+        export POD_NAME=$(kubectl get pods --namespace 
+        default -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}")
+      changed_when: true
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Get pod name
+      shell: echo $POD_NAME
+      register: pod_name
+      changed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Check if prometheus-server is in running state
+      command: kubectl get pods {{ pod_name.stdout }}
+      register: slurm_exporter_pod_status
+      ignore_errors: yes
+      changed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm exporter job in k8s services
+      assert:
+        that:
+          - "'Error from server' not in slurm_exporter_pod_status.stdout"
+        fail_msg: "{{ slurm_exporter_job_fail_msg }}"
+        success_msg: "{{ slurm_exporter_job_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking mariadb service status
+      systemd:
+        name: mariadb
+      register: mariadb_service
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating mariadb service status
+      assert:
+        that:
+          - mariadb_service.status.ActiveState == 'active'
+        fail_msg: "{{ mariadb_service_fail_msg }}"
+        success_msg: "{{ mariadb_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking slurmctld service status
+      systemd:
+        name: slurmctld
+      register: slurmctld_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking slurmdbd service status
+      systemd:
+        name: slurmdbd
+      register: slurmdbd_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Check if slurm is installed
+      command: sinfo -V
+      register: slurm_version
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating slurmctld service status
+      assert:
+        that:
+          - slurmctld_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmctld_service_fail_msg }}"
+        success_msg: "{{ slurmctld_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating slurmdbd service status
+      assert:
+        that:
+          - slurmdbd_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmdbd_service_fail_msg }}"
+        success_msg: "{{ slurmdbd_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm installation
+      assert:
+        that: "'command not found' not in slurm_version.stdout"
+        fail_msg: "{{ slurm_status_fail_msg }}"
+        success_msg: "{{ slurm_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Submit kubernetes job
+      command: kubectl run nginx --image=nginx --restart=Never
+      changed_when: false
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Check submitted kubernetes job status
+      command: kubectl get pod nginx
+      register: kubo_job
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate kubernetes job submission
+      assert:
+        that: "'pods nginx not found' not in kubo_job.stdout"
+        fail_msg: "{{ kubernetes_job_status_fail_msg }}"
+        success_msg: "{{ kubernetes_job_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+         
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: compute
+  vars_files:
+    - test_vars/test_slurm_workers_vars.yml
+  tasks:    
+    - name: Check if slurm is installed
+      command: sinfo -V
+      register: slurm_version
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking slurmd service status
+      service:
+        name: slurmd.service
+      register: slurmd_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm installation
+      assert:
+        that: "'command not found' not in slurm_version.stdout"
+        fail_msg: "{{ slurm_status_fail_msg }}"
+        success_msg: "{{ slurm_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating slurmd service status
+      assert:
+        that:
+          - slurmd_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmd_service_fail_msg }}"
+        success_msg: "{{ slurmd_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: manager, login_node
+  vars_files:
+    - test_vars/test_login_common_vars.yml
+    
+  tasks:    
+    - name: Checking installed Freeipa version
+      command: ipa --version
+      register: ipa_version
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating Freeipa Installation
+      assert:
+        that:
+          - "'command not found' not in ipa_version.stdout"
+        fail_msg: "{{ ipa_install_fail_msg }}"
+        success_msg: "{{ ipa_install_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Start and enable firewalld
+      service:
+        name: firewalld
+        state: started
+        enabled: yes
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking firewalld open ports on manager/login node
+      command: firewall-cmd --list-ports
+      changed_when: false
+      register: login_common_firewalld_ports
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating firewalld open ports on manager/login node
+      assert:
+        that:
+          - "'80/tcp' in login_common_firewalld_ports.stdout"
+          - "'443/tcp' in login_common_firewalld_ports.stdout"
+          - "'389/tcp' in login_common_firewalld_ports.stdout"
+          - "'636/tcp' in login_common_firewalld_ports.stdout"
+          - "'88/tcp' in login_common_firewalld_ports.stdout"
+          - "'464/tcp' in login_common_firewalld_ports.stdout"
+          - "'88/udp' in login_common_firewalld_ports.stdout"
+          - "'464/udp' in login_common_firewalld_ports.stdout"
+          - "'53/tcp' in login_common_firewalld_ports.stdout"
+          - "'53/udp' in login_common_firewalld_ports.stdout"
+          - "'123/udp' in login_common_firewalld_ports.stdout"
+          - "'7389/tcp' in login_common_firewalld_ports.stdout"
+        fail_msg: "{{ login_common_ports_status_fail_msg }}"
+        success_msg: "{{ login_common_ports_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Stop and disable firewalld
+      service:
+        name: firewalld
+        state: stopped
+        enabled: no
+      tags: VERIFY_OMNIA_01
+
+    - name: Check Freeipa server/client configuration
+      command: ipa help topics
+      register: ipa_config
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating Freeipa server/client Configuration
+      assert:
+        that:
+          - "'command not found' not in ipa_config.stdout"
+        fail_msg: "{{ ipa_configuration_fail_msg }}"
+        success_msg: "{{ ipa_configuration_success_msg }}"
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Ensure host is present
+      shell: echo "{{ ipa_admin_password }}" | kinit admin
+      register: authen
+      changed_when: false
+      ignore_errors: true
+      tags: VERIFY_OMNIA_01
+   
+    - name: Validate admin user in ipa server/client
+      assert:
+        that:
+          - authen.rc == 0
+        fail_msg: "{{ admin_user_authentication_status_fail_msg }}"
+        success_msg: "{{ admin_user_authentication_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: login_node
+  gather_facts: false
+  vars_files:
+    - test_vars/test_login_node_vars.yml
+    - test_vars/test_slurm_workers_vars.yml
+    
+  tasks: 
+    - name: Checking slurmd service status
+      service:
+        name: slurmd.service
+      register: slurmd_service
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating slurmd service status
+      assert:
+        that:
+          - slurmd_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmd_service_fail_msg }}"
+        success_msg: "{{ slurmd_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Submit slurm jobs
+      command: srun --nodes "{{ nodes }}" --ntasks-per-node "{{ ntasks }}" --partition normal hostname
+      register: job_status
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm job submission
+      assert:
+        that: "'compute.ipa.test' in job_status.stdout"
+        fail_msg: "{{ slurm_job_status_fail_msg }}"
+        success_msg: "{{ slurm_job_status_success_msg }}"
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: nfs_node
+  vars_files:
+    - test_vars/test_nfs_node_vars.yml
+   
+  tasks:
+      
+    - name: Checking rpcbind service status
+      systemd:
+        name: rpcbind
+      register: rpcbind_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating rpcbind service status
+      assert:
+        that:
+          - rpcbind_service.status.ActiveState == 'active'
+        fail_msg: "{{ rpcbind_service_fail_msg }}"
+        success_msg: "{{ rpcbind_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking nfs-server service status
+      systemd:
+        name: nfs-server
+      register: nfs_server_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating nfs-server service status
+      assert:
+        that:
+          - nfs_server_service.status.ActiveState == 'active'
+        fail_msg: "{{ nfs_server_service_fail_msg }}"
+        success_msg: "{{ nfs_server_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking nfs-lock service status
+      systemd:
+        name: nfs-lock
+      register: nfs_lock_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating nfs-lock service status
+      assert:
+        that:
+          - nfs_lock_service.status.ActiveState == 'active'
+        fail_msg: "{{ nfs_lock_service_fail_msg }}"
+        success_msg: "{{ nfs_lock_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking nfs-idmap service status
+      systemd:
+        name: nfs-idmap
+      register: nfs_idmap_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating nfs-idmap service status
+      assert:
+        that:
+          - nfs_idmap_service.status.ActiveState == 'active'
+        fail_msg: "{{ nfs_idmap_service_fail_msg }}"
+        success_msg: "{{ nfs_idmap_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Check if nfs server setup is complete
+      command: exportfs -v
+      changed_when: false
+      register: nfs_share
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validate nfs server setup
+      assert:
+        that: "'{{ nfs_dir }}' in nfs_share.stdout"
+        fail_msg: "{{ nfs_server_fail_msg }}"
+        success_msg: "{{ nfs_server_success_msg }}"
+      tags: VERIFY_OMNIA_01      

+ 28 - 0
test/test_vars/test_login_common_vars.yml

@@ -0,0 +1,28 @@
+
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+ipa_admin_password: "omnia@1234"
+login_common_ports_status_fail_msg: "Tcp/udp ports are not opened in manager/login node"
+login_common_ports_status_success_msg: "Tcp/udp ports are opened in manager/login node"
+
+ipa_install_fail_msg: "FreeIpa is not installed"
+ipa_install_success_msg: "FreeIpa is installed"
+
+ipa_configuration_fail_msg: "Freeipa is not configured properly"
+ipa_configuration_success_msg: "Freeipa is configured properly"
+
+admin_user_authentication_status_fail_msg: "Admin user denied access"
+admin_user_authentication_status_success_msg: "Admin user successfully authenticated" 

+ 31 - 0
test/test_vars/test_login_node_vars.yml

@@ -0,0 +1,31 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+ipa_client_packages:
+  - bind-utils
+  - freeipa-client
+  - ipa-admintools
+
+freeipa_client_packages_status_success_msg: "Freeipa-client packages are installed"
+freeipa_client_packages_status_fail_msg: "Freeipa-client packages are not installed"
+
+nodes: "1"
+ntasks: "1"
+
+slurm_job_status_fail_msg: "Slurm jobs execution failed"
+slurm_job_status_success_msg: "Slurm jobs executed and running successfully"
+
+kubernetes_job_status_fail_msg: "Kubernetes job failed"
+kubernetes_job_status_success_msg: "Kubernetes job is running successfully"

+ 25 - 0
test/test_vars/test_login_server_vars.yml

@@ -0,0 +1,25 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+ipa_server_packages:
+  - bind
+  - bind-dyndb-ldap
+  - ipa-server-dns
+  - freeipa-server
+
+
+freeipa_server_packages_status_success_msg: "Freeipa-server packages are installed"
+freeipa_server_packages_status_fail_msg: "Freeipa-server packages are not installed"
+

+ 34 - 0
test/test_vars/test_nfs_node_vars.yml

@@ -0,0 +1,34 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+nfs_dir: "/me4_k8s_nfs"
+
+nfs_dir_fail_msg: "nfs share directory is present"
+nfs_dir_success_msg: "Nfs share directory is not present"
+
+rpcbind_service_fail_msg: "Rpcbind service is not running"
+rpcbind_service_success_msg: "Rpcbind service is running"
+
+nfs_server_service_fail_msg: "nfs-server service is not running"
+nfs_server_service_success_msg: "nfs-server service is running"
+
+nfs_lock_service_fail_msg: "nfs-lock service is not running"
+nfs_lock_service_success_msg: "nfs-lock service is running"
+
+nfs_idmap_service_fail_msg: "nfs-idmap service is not running"
+nfs_idmap_service_success_msg: "nfs-idmap service is running"
+
+nfs_server_success_msg: "nfs server is setup successfully"
+nfs_server_fail_msg: "nfs server setup is unsuccessful"

+ 50 - 0
test/test_vars/test_omnia_1.1_vars.yml

@@ -0,0 +1,50 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+host1: 192.168.0.17
+host2: 192.168.0.19
+host3: 192.168.0.16
+host4: 192.168.0.22
+host5: 192.168.0.18
+
+config_filename: "omnia_config.yml"
+config_vaultname: ".omnia_vault_key"
+
+file_permission: "0644"
+db_passwd_invalid: "omnia123-"
+db_passwd_complex: "omnIaFD@123gn)opk"
+db_passwd_default: "password"
+k8s_cni_one: "flannel"
+k8s_cni_two: "calico"
+k8s_pod_network_cidr_default: "10.244.0.0/16"
+k8s_pod_network_cidr_other: "192.168.0.0/16"
+k8s_new_version: "1.19.3"
+ipa_passwd_invalid: "Omnia12-3"
+ipa_passwd_default: "omnia1234"
+ipa_passwd_complex: "Omnia@De9$123%"
+sleep_time: 5
+pod_time: 10
+
+compute_node_success_msg: "New compute node is successfully added to the cluster"
+compute_node_fail_msg: " New compute node failed to add in the cluster"
+
+compute_node_del_success_msg: "New compute node is successfully deleted from the cluster"
+compute_node_del_fail_msg: " New compute node failed to delete from the cluster"
+
+ipa_password_error_fail_msg: "ipa admin invalid password value passed"
+ipa_password_error_success_msg: "ipa admin invalid password value failed"
+
+mariadb_password_error_fail_msg: "mariadb password invalid value passed"
+mariadb_password_error_success_msg: "mariadb password invalid value failed"