Explorar el Código

Merge branch 'devel' into nfs_node

blesson-james hace 3 años
padre
commit
07e00ecbfd
Se han modificado 40 ficheros con 4115 adiciones y 54 borrados
  1. 4 2
      .all-contributorsrc
  2. 3 3
      .github/workflows/ansible-lint.yml
  3. 2 2
      README.md
  4. 16 8
      control_plane/input_params/powervault_me4_vars.yml
  5. 37 0
      control_plane/roles/control_plane_common/tasks/count_component_roles.yml
  6. 9 1
      control_plane/roles/control_plane_common/tasks/main.yml
  7. 82 0
      control_plane/roles/control_plane_common/tasks/validate_device_mapping_file.yml
  8. 179 0
      control_plane/roles/control_plane_common/tasks/validate_host_mapping_file.yml
  9. 30 0
      control_plane/roles/control_plane_common/vars/main.yml
  10. 14 6
      control_plane/roles/powervault_me4/tasks/map_volume.yml
  11. 5 0
      control_plane/roles/powervault_me4/tasks/ports.yml
  12. 4 4
      control_plane/roles/powervault_me4/tasks/pv_me4_prereq.yml
  13. 15 3
      control_plane/roles/powervault_me4/tasks/pv_validation.yml
  14. 76 7
      control_plane/roles/powervault_me4/tasks/volume.yml
  15. 1 2
      control_plane/roles/powervault_me4/vars/main.yml
  16. 709 0
      control_plane/test/test_control_plane.yml
  17. 271 0
      control_plane/test/test_control_plane_validation.yml
  18. 47 0
      control_plane/test/test_eth_mtu.yml
  19. 346 0
      control_plane/test/test_ethernet_config.yml
  20. 157 0
      control_plane/test/test_ethernet_fact.yml
  21. 5 0
      control_plane/test/test_ethernet_inventory
  22. 150 0
      control_plane/test/test_vars/base_vars.yml
  23. 81 0
      control_plane/test/test_vars/login_vars.yml
  24. 94 0
      control_plane/test/test_vars/test_control_plane_vars.yml
  25. 56 0
      control_plane/test/test_vars/test_ethernet_vars.yml
  26. 3 0
      examples/host_mapping_file_one_touch.csv
  27. 3 0
      examples/host_mapping_file_os_provisioning.csv
  28. 2 0
      examples/mapping_device_file.csv
  29. 0 2
      examples/mapping_file.csv
  30. 91 0
      roles/cluster_validation/tasks/fetch_powervault_status.yml
  31. 63 8
      roles/cluster_validation/tasks/main.yml
  32. 10 2
      roles/cluster_validation/tasks/validations.yml
  33. 13 4
      roles/cluster_validation/vars/main.yml
  34. 901 0
      test/test_omnia_1.1.yml
  35. 468 0
      test/test_omnia_validation.yml
  36. 28 0
      test/test_vars/test_login_common_vars.yml
  37. 31 0
      test/test_vars/test_login_node_vars.yml
  38. 25 0
      test/test_vars/test_login_server_vars.yml
  39. 34 0
      test/test_vars/test_nfs_node_vars.yml
  40. 50 0
      test/test_vars/test_omnia_1.1_vars.yml

+ 4 - 2
.all-contributorsrc

@@ -124,7 +124,8 @@
       "profile": "https://github.com/abhishek-s-a",
       "contributions": [
         "code",
-        "doc"
+        "doc",
+        "test"
       ]
     },
     {
@@ -162,7 +163,8 @@
       "avatar_url": "https://avatars.githubusercontent.com/u/72784834?v=4",
       "profile": "https://github.com/VishnupriyaKrish",
       "contributions": [
-        "code"
+        "code",
+        "test"
       ]
     },
     {

+ 3 - 3
.github/workflows/ansible-lint.yml

@@ -39,9 +39,9 @@ jobs:
         # [optional]
         # Arguments to override a package and its version to be set explicitly.
         # Must follow the example syntax.
-        #override-deps: |
-        #  ansible==2.9
-        #  ansible-lint==4.2.0
+        override-deps: |
+          ansible==2.10
+          ansible-lint==5.1.2
         # [optional]
         # Arguments to be passed to the ansible-lint
 

La diferencia del archivo ha sido suprimido porque es demasiado grande
+ 2 - 2
README.md


+ 16 - 8
control_plane/input_params/powervault_me4_vars.yml

@@ -22,7 +22,7 @@ locale: "English"
 # Specify the system name to identify the system
 # By default it is set to "Uninitialized_Name"
 # Length should be less than 30 and it should not contain space.
-# This is "optional"
+# Optional
 powervault_me4_system_name: "Unintialized_Name"
 
 # Specify the snmp notification level
@@ -37,6 +37,11 @@ powervault_me4_system_name: "Unintialized_Name"
 # Compulsory
 powervault_me4_snmp_notify_level: "none"
 
+# The type of pool to be created on the powervault
+# It can be either linear or virtual.
+# Default: linear
+powervault_me4_pool_type: "linear"
+
 # Specify the required RAID Level
 # The different RAID levels and the min and max number of disks supported for each RAID are
 # r1/raid1: 2
@@ -53,24 +58,27 @@ powervault_me4_raid_levels: "raid1"
 # the enclosure number and disk range in the Enter Range of Disks text box. 
 # Use the format enclosure-number.disk-range,enclosure-number.disk-range. 
 # For example, to select disks 3-12 in enclosure 1 and 5-23 in enclosure 2, enter 1.3-12,2.5-23.
-# For ME4012 - 0.0-0.11,1.0-1.11 are the allowed values
+# For ME4012 - 0.0-0.11 are the allowed values
+# For RAID 10 disk range should be given in subgroups separated by colons with no spaces.
+# RAID-10 example:1.1-2:1.3-4:1.7,1.10
 # Default value is 0.1-2
 # Compulsory
 powervault_me4_disk_range: "0.1-2"
 
 # Specify the volume names
-# Cannot be left blank
 # the default value is "k8s_volume" and "slurm_volume"
 # Compulsory
 powervault_me4_k8s_volume_name: "k8s_volume"
 powervault_me4_slurm_volume_name: "slurm_volume"
 
 # Specify the disk group name
-# If left blank, system automatically assigns the name
+# Mandatory
 powervault_me4_disk_group_name: "omnia"
 
 # Specify the percentage for partition in disk
 # Default value is "60%"
+# Min: 5
+# Max: 90
 # Compulsory
 powervault_me4_disk_partition_size: "60"
 
@@ -79,12 +87,12 @@ powervault_me4_disk_partition_size: "60"
 # Compulsory
 powervault_me4_volume_size: "100GB"
 
-#Specify the pool for volume
+# Specify the pool for disk and volumes
 # Pool can either be  a/A or b/B.
-# Compulsory
+# Mandatory if powervault_me4_pool_type selected as "virtual".
 powervault_me4_pool: "a"
 
 # Specify the nic of the server with which Powervault is connected.
-# Default value is eno1.
+# Default value is em1.
 # Compulsory
-powervault_me4_server_nic: "eno1"
+powervault_me4_server_nic: "em1"

+ 37 - 0
control_plane/roles/control_plane_common/tasks/count_component_roles.yml

@@ -0,0 +1,37 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# limitations under the License.
+---
+
+- name: Count of manager nodes defined
+  set_fact:
+    count_of_manager: "{{ count_of_manager| int + 1 }}"
+  when: item == group_name_manager
+  tags: install
+
+- name: Count of compute nodes defined
+  set_fact:
+    count_of_compute: "{{ count_of_compute| int + 1 }}"
+  when: item == group_name_compute
+  tags: install
+
+- name: Count of login nodes defined
+  set_fact:
+    count_of_login: "{{ count_of_login| int + 1 }}"
+  when: item == group_name_login
+  tags: install
+
+- name: Count of NFS nodes defined
+  set_fact:
+    count_of_nfs_node: "{{ count_of_nfs_node| int + 1 }}"
+  when: item == group_name_nfs
+  tags: install

+ 9 - 1
control_plane/roles/control_plane_common/tasks/main.yml

@@ -38,8 +38,16 @@
   import_tasks: fetch_sm_inputs.yml
   when: ib_switch_support
 
+- name: Host mapping file validation
+  import_tasks: validate_host_mapping_file.yml
+  when: host_mapping_file_path |length >0
+
+- name: Device mapping file validation
+  import_tasks: validate_device_mapping_file.yml
+  when: mngmnt_mapping_file_path |length >0
+
 - name: Encrypt idrac_tools_vars.yml
   import_tasks: encrypt_idrac_tools_vars.yml
 
 - name: NFS Server setup for offline repo and awx
-  import_tasks: nfs_server_setup.yml
+  import_tasks: nfs_server_setup.yml

+ 82 - 0
control_plane/roles/control_plane_common/tasks/validate_device_mapping_file.yml

@@ -0,0 +1,82 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# limitations under the License.
+---
+- name: Check that device mapping file exists at mentioned path
+  stat:
+    path: "{{ mngmnt_mapping_file_path }}"
+  register: stat_result
+  tags: install
+
+- name: Fail if config file doesn't exist
+  fail:
+    msg: "{{ fail_msg_mapping_file + mngmnt_mapping_file_path }}"
+  when: not stat_result.stat.exists
+  tags: install
+
+- name: Read device mapping file from CSV file and return a dictionary
+  read_csv:
+    path: "{{ mngmnt_mapping_file_path }}"
+    key: "{{ mapping_file_key }}"
+  register: device_mapping_file
+  delegate_to: localhost
+  tags: install
+
+- name: Check if header is present in mapping file
+  shell:  set -o pipefail && awk 'NR==1 { print $1}' "{{ mngmnt_mapping_file_path }}"
+  register: mngmnt_header
+  changed_when: false
+  tags: install
+
+- name: Fail if header not in correct format
+  fail:
+    msg: "{{ fail_device_mapping_file_header }}"
+  when: mngmnt_header.stdout !=  device_mapping_header_format
+  tags: install
+
+- name: Check if mapping file is comma seperated
+  shell: awk -F\, '{print NF-1}' "{{ mngmnt_mapping_file_path }}"
+  register: mngmnt_comma_seperated
+  changed_when: false
+  tags: install
+
+- name: Fail if not comma seperated or if all fields are not given
+  fail:
+    msg: "{{ fail_mapping_file_field_seperation }}"
+  when: not(item =="1")
+  with_items: "{{ mngmnt_comma_seperated.stdout_lines }}"
+  tags: install
+
+- name: Initialize count variables
+  set_fact:
+    list_of_ips: []
+    count_total_items: "{{ device_mapping_file.dict |length }}"
+  tags: install
+
+- name: Create list of IPs in mapping file
+  set_fact:
+    list_of_ips: "{{ [ item.value.IP ] + list_of_ips }}"
+  loop: "{{ device_mapping_file.dict | dict2items }}"
+  loop_control:
+    label: "{{ item.value.MAC }}"
+  tags: install
+
+- name: Find count of unique IPs
+  set_fact:
+    count_of_unique_ip : "{{ list_of_ips| unique| length }}"
+  tags: install
+
+- name: Validation to check if unique IPs are provided for each node
+  fail:
+    msg: "{{ fail_mapping_file_duplicate_ip + mngmnt_mapping_file_path }}"
+  when: not(count_of_unique_ip|int == count_total_items|int)
+  tags: install

+ 179 - 0
control_plane/roles/control_plane_common/tasks/validate_host_mapping_file.yml

@@ -0,0 +1,179 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# limitations under the License.
+---
+- name: Check that host mapping file exists at mentioned path
+  stat:
+    path: "{{ host_mapping_file_path }}"
+  register: stat_result
+  tags: install
+
+- name: Fail if config file doesn't exist
+  fail:
+    msg: "{{ fail_msg_mapping_file + host_mapping_file_path }}"
+  when: not stat_result.stat.exists
+  tags: install
+
+- name: Read host mapping file from CSV file and return a dictionary
+  read_csv:
+    path: "{{ host_mapping_file_path }}"
+    key: "{{ mapping_file_key }}"
+  register: mapping_file
+  delegate_to: localhost
+  tags: install
+
+- name: Initialize variable for role support in mapping file
+  set_fact:
+    component_role_support: false
+  tags: install
+
+- name: Check if header is present in mapping file
+  shell:  set -o pipefail && awk 'NR==1 { print $1}' "{{ host_mapping_file_path }}"
+  register: mngmnt_header
+  changed_when: false
+  tags: install
+
+- name: Fail if header not in correct format
+  assert:
+    that: (mngmnt_header.stdout ==  host_mapping_header_format) or (mngmnt_header.stdout == host_mapping_header_with_role_format)
+    fail_msg: "{{ fail_mapping_file_header }}"
+  tags: install
+
+- name: Check if mapping file is comma seperated
+  shell: awk -F\, '{print NF-1}' "{{ host_mapping_file_path }}"
+  register: mngmnt_comma_seperated
+  changed_when: false
+  tags: install
+
+- name: Set variable if component roles given in mapping file
+  set_fact:
+    component_role_support: true
+  when: mngmnt_header.stdout == host_mapping_header_with_role_format
+  tags: install
+
+- name: Fail if not comma seperated or if all fields are not given for MAC,Hostname,IP,Component_role
+  fail:
+    msg: "{{ fail_mapping_file_field_seperation }}"
+  when: not(item =="3") and not (item == "-1") and component_role_support
+  with_items: "{{ mngmnt_comma_seperated.stdout_lines }}"
+  tags: install
+
+- name: Fail if not comma seperated or if all fields are not given for MAC,Hostname,IP
+  fail:
+    msg: "{{ fail_mapping_file_field_seperation }}"
+  when: not(item =="2") and not (item == "-1") and not(component_role_support)
+  with_items: "{{ mngmnt_comma_seperated.stdout_lines }}"
+  tags: install
+
+- name: Initialize count variables
+  set_fact:
+    list_of_ips: []
+    list_of_roles: []
+    list_of_hostnames: []
+    count_of_manager: 0
+    count_of_compute: 0
+    count_of_nfs_node: 0
+    count_of_login: 0
+    count_total_items: "{{ mapping_file.dict |length }}"
+  tags: install
+
+- name: Create list of IPs and component roles and hostnames defined in mapping file
+  set_fact:
+    list_of_ips: "{{ [ item.value.IP ] + list_of_ips }}"
+    list_of_hostnames: "{{ [ item.value.Hostname ] + list_of_hostnames }}"
+  loop: "{{ mapping_file.dict | dict2items }}"
+  loop_control:
+    label: "{{ item.value.MAC }}"
+  tags: install
+
+- name: Create list of component roles defined in mapping file
+  set_fact:
+    list_of_roles: "{{ [ item.value.Component_role ] + list_of_roles }}"
+  loop: "{{ mapping_file.dict | dict2items }}"
+  loop_control:
+    label: "{{ item.value.MAC }}"
+  when: component_role_support
+  tags: install
+
+- name: Assert hostnames
+  assert:
+    that:
+      - '"_" not in item'
+      - '"." not in item'
+      - '" " not in item'
+    quiet: yes
+    fail_msg: "{{ fail_mapping_file_hostname_chars + item }}"
+  with_items: "{{ list_of_hostnames }}"
+  tags: install
+
+- name: Find count of unique IPs
+  set_fact:
+    count_of_unique_ip : "{{ list_of_ips| unique| length }}"
+  tags: install
+
+- name: Validation to check if unique IPs are provided for each node
+  fail:
+    msg: "{{ fail_mapping_file_duplicate_ip + host_mapping_file_path }}"
+  when: not(count_of_unique_ip|int == count_total_items|int)
+  tags: install
+
+- name: Find count of unique hostnames
+  set_fact:
+    count_of_unique_hostnames : "{{ list_of_hostnames | unique | length }}"
+  tags: install
+
+- name: Validation to check if unique hostnames are provided for each node
+  fail:
+    msg: "{{ fail_mapping_file_duplicate_hostname }}"
+  when: not(count_of_unique_hostnames|int == count_total_items| int)
+  tags: install
+
+- name: Find count of each component role defined in mapping file
+  include_tasks: count_component_roles.yml
+  loop: "{{ list_of_roles }}"
+  when: component_role_support
+  tags: install
+
+- block:
+  - name: Validation to check if component roles for each node is defined
+    fail:
+      msg: "{{ fail_mapping_file_roles_error }}"
+    when: not( count_total_items|int == (count_of_manager|int + count_of_compute|int + count_of_login|int + count_of_nfs_node|int))
+
+  - name: Validation to check number of manager nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_manager_role }}"
+    when: not (count_of_manager | int  == 1)
+
+  - name: Validation to check number of compute nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_compute_role }}"
+    when: count_of_compute|int  < 1
+
+  - name: Validation to check number of login nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_login_role }}"
+    when: not ( count_of_login|int == 1)
+
+  - name: Validation to check number of nfs nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_nfs_role }}"
+    when: powervault_support and not (count_of_nfs_node|int == 1)
+  tags: install
+
+  rescue:
+  - name: Count of roles defined
+    fail:
+      msg: "{{ count_of_roles_defined }}"
+    tags: install
+
+  when: component_role_support

+ 30 - 0
control_plane/roles/control_plane_common/vars/main.yml

@@ -140,3 +140,33 @@ nfs_services:
   - mountd
   - rpc-bind
   - nfs
+
+# Usage: validate_host_mapping_file.yml
+fail_msg_mapping_file: "Mapping file doesn't exist at given path: "
+mapping_file_key: "MAC"
+fail_mapping_file_header: "Header of csv file is not in correct format.
+                          It should be of the format: MAC,Hostname,IP,Component_role or MAC,Hostname,IP"
+host_mapping_header_format: "MAC,Hostname,IP"
+host_mapping_header_with_role_format: "MAC,Hostname,IP,Component_role"
+fail_mapping_file_field_seperation: "Failed: Mapping file should be comma separated and all fields must be filled."
+fail_mapping_file_duplicate_ip: "Failed: Duplicate ip exists. Please verify following mapping file again: "
+fail_mapping_file_duplicate_hostname: "Failed: Duplicate hostname exists. Please verify host mapping file again."
+fail_mapping_file_hostname_chars: "Hostname should not contain _ or . or space as it will cause error with slurm and K8s. Found in: "
+fail_mapping_file_roles_error: "Failed. Define correct Component Roles for each node.
+                                Component roles can only take values: {{ group_name_manager }}, {{group_name_compute}},
+                                 {{ group_name_login }}, {{ group_name_nfs }}"
+fail_mapping_file_manager_role: "Exactly 1 manager node must be defined"
+fail_mapping_file_compute_role: "Atleast 1 compute node must be defined"
+fail_mapping_file_login_role: "Exactly 1 login node must be defined"
+fail_mapping_file_nfs_role: "Exactly 1 nfs node must be defined"
+count_of_roles_defined: "Component Roles defined: Manager Node: {{ count_of_manager }},
+                        Compute Nodes: {{ count_of_compute }}, Login Node: {{ count_of_login }},
+                        Nfs Node: {{ count_of_nfs_node }}, Total Nodes: {{ count_total_items }} "
+group_name_manager: "manager"
+group_name_compute: "compute"
+group_name_login: "login_node"
+group_name_nfs: "nfs_node"
+
+# Usage: validate_device_mapping_file.yml
+fail_device_mapping_file_header: "Failed: Header (MAC,IP) should be present in the mapping file."
+device_mapping_header_format: "MAC,IP"

+ 14 - 6
control_plane/roles/powervault_me4/tasks/map_volume.yml

@@ -24,6 +24,7 @@
   changed_when: false
   no_log: true
   register: config_content
+  delegate_to: localhost
   run_once: true
 
 - name: Decrpyt login_vars.yml
@@ -32,6 +33,7 @@
     --vault-password-file {{ login_pv_vault_file }}
   changed_when: false
   run_once: true
+  delegate_to: localhost
   when: "'$ANSIBLE_VAULT;' in config_content.stdout"
 
 - name: Include variable file login_vars.yml
@@ -57,9 +59,18 @@
   delegate_to: localhost
   tags: install
 
+- name: Get map port
+  set_fact:
+    map_port: "{{ item.0 }}"
+  when: hostvars['pv']['map_ip'] == item.1
+  with_together:
+    - "{{ up_port }}"
+    - "{{ set_port_ip }}"
+  register: output
+
 - name: Map volume
   uri:
-    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_k8s_volume_name }}/access/{{ access }}/ports/{{ item.0 }}/lun/{{ lun1 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn'] }}
+    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_k8s_volume_name }}/access/{{ access }}/ports/{{ map_port }}/lun/{{ lun1 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn'] }}
     method: GET
     body_format: json
     validate_certs: no
@@ -67,14 +78,12 @@
     headers:
       {'sessionKey': "{{ map_session_key.json.status[0].response }}", 'datatype':'json'}
   register: map_vol1
-  with_together:
-    - "{{ up_port }}"
   delegate_to: localhost
   tags: install
 
 - name: Map volume
   uri:
-    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_slurm_volume_name }}/access/{{ access }}/ports/{{ item.0 }}/lun/{{ lun2 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn']  }}
+    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_slurm_volume_name }}/access/{{ access }}/ports/{{ map_port }}/lun/{{ lun2 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn']  }}
     method: GET
     body_format: json
     validate_certs: no
@@ -82,8 +91,6 @@
     headers:
       {'sessionKey': "{{ map_session_key.json.status[0].response }}", 'datatype':'json'}
   register: map_vol2
-  with_together:
-    - "{{ up_port }}"
   delegate_to: localhost
   tags: install
 
@@ -93,4 +100,5 @@
     --vault-password-file {{ login_pv_vault_file }}
   changed_when: false
   run_once: true
+  delegate_to: localhost
   when: "'$ANSIBLE_VAULT;' in config_content.stdout"

+ 5 - 0
control_plane/roles/powervault_me4/tasks/ports.yml

@@ -24,6 +24,7 @@
   changed_when: false
   no_log: true
   register: config_content
+  delegate_to: localhost
   run_once: true
 
 - name: Decrpyt login_vars.yml
@@ -32,6 +33,7 @@
     --vault-password-file {{ login_pv_vault_file }}
   changed_when: false
   run_once: true
+  delegate_to: localhost
   when: "'$ANSIBLE_VAULT;' in config_content.stdout"
 
 - name: Include variable file login_vars.yml
@@ -53,6 +55,7 @@
       {'datatype': 'json'}
     validate_certs: no
   register: port_session_key
+  delegate_to: localhost
   tags: install
 
 - name: Show ports
@@ -65,6 +68,7 @@
     headers:
       {'sessionKey': "{{ port_session_key.json.status[0].response }}", 'datatype':'json'}
   register: show_ports
+  delegate_to: localhost
   tags: install
 
 - name: Up ports
@@ -95,4 +99,5 @@
   with_together: 
     - "{{ set_port_ip }}"
     - "{{ up_port }}"
+  delegate_to: localhost
   tags: install

+ 4 - 4
control_plane/roles/powervault_me4/tasks/pv_me4_prereq.yml

@@ -57,14 +57,14 @@
 
 - name: Get the product id
   set_fact:
-    pv_id: system_info.json.system[0]['product-id']
+    pv_id: "{{ system_info.json.system[0]['product-id'] }}"
 
 - name: Verify the product id and model no. of device
   fail:
     msg: "{{ fail_pv_support }}"
   when:
-    - scsi_product_id in system_info.json.system[0]['scsi-product-id']
-    - pv_id  == "ME4084" or pv_id == "ME4024"  or pv_id == "ME4012"
+    - scsi_product_id not in system_info.json.system[0]['scsi-product-id']
+    - pv_id  != "ME4084" or pv_id != "ME4024"  or pv_id != "ME4012"
 
 - name: Set system name
   uri:
@@ -76,5 +76,5 @@
     headers:
       {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
   register: system_name
-  when: powervault_me4_system_name != ""
+  when: powervault_me4_system_name
   tags: install

+ 15 - 3
control_plane/roles/powervault_me4/tasks/pv_validation.yml

@@ -33,6 +33,7 @@
 - name: Check if system name has space
   shell: echo {{ powervault_me4_system_name }} | grep '\s' -c
   register: space_count
+  changed_when: false
   ignore_errors: true
   tags: install
 
@@ -46,6 +47,7 @@
 - name: Check if volume name has space
   shell: echo {{ powervault_me4_k8s_volume_name }} | grep '\s' -c
   register: vol_count1
+  changed_when: false
   ignore_errors: true
   tags: install
 
@@ -59,6 +61,7 @@
 - name: Check if volume name has space
   shell: echo {{ powervault_me4_slurm_volume_name }} | grep '\s' -c
   register: vol_count2
+  changed_when: false
   ignore_errors: true
   tags: install
 
@@ -80,6 +83,7 @@
   assert:
     that:
       - disk_count.stdout == "0"
+      - powervault_me4_disk_group_name | length > 1
       - powervault_me4_disk_group_name | length < 30
     msg: "{{ system_name_wrong }}" 
 
@@ -89,7 +93,7 @@
       - powervault_me4_snmp_notify_level | length >1
       - powervault_me4_snmp_notify_level == "crit" or powervault_me4_snmp_notify_level == "error" or powervault_me4_snmp_notify_level == "warn" or powervault_me4_snmp_notify_level == "resolved" or powervault_me4_snmp_notify_level == "info" or powervault_me4_snmp_notify_level == "none"
     fail_msg: "{{ snmp_wrong_value }}"
-    success_msg: "{{ snmp_success }}" 
+    success_msg: "{{ snmp_success }}"
 
 - name: Assert RAID value
   assert:
@@ -113,12 +117,20 @@
     that: 
       - powervault_me4_pool == "a" or powervault_me4_pool == "A" or powervault_me4_pool == "b" or powervault_me4_pool == "B"
     msg: "{{ wrong_pool }}"
+  when: powervault_me4_pool_type == "virtual"
+
+- name: Check pool type
+  assert:
+    that:
+      - powervault_me4_pool_type | length > 1
+      - powervault_me4_pool_type | lower == "virtual" or powervault_me4_pool_type | lower == "linear"
+    msg: "{{ wrong_pool_type }}"
 
 - name: Check parition percentage
   assert:
     that:
       - powervault_me4_disk_partition_size|int
-      - powervault_me4_disk_partition_size|int < 99
+      - powervault_me4_disk_partition_size|int < 90
       - powervault_me4_disk_partition_size|int > 5
     msg: "{{ wrong_partition }}"
 
@@ -133,4 +145,4 @@
 - name: Assert the nic provided
   assert:
     that:
-      - powervault_me4_server_nic | length > 2
+      - powervault_me4_server_nic | length > 2

+ 76 - 7
control_plane/roles/powervault_me4/tasks/volume.yml

@@ -30,22 +30,45 @@
   register: vol_session_key
   tags: install
 
-- name: Add disk group
+- name: Add disk group in virtual pool
   uri:
-    url: https://{{ inventory_hostname }}/api/add/disk-group/type/{{ type }}/disks/{{ powervault_me4_disk_range }}/level/{{ powervault_me4_raid_levels }}/pool/{{ powervault_me4_pool }}/{{ powervault_me4_disk_group_name }}
+    url: https://{{ inventory_hostname }}/api/add/disk-group/type/{{ powervault_me4_pool_type }}/disks/{{ powervault_me4_disk_range }}/level/{{ powervault_me4_raid_levels }}/pool/{{ powervault_me4_pool }}/{{ powervault_me4_disk_group_name }}
     method: GET
     body_format: json
     validate_certs: no
     use_proxy: no
     headers:
       {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
-  register: pv_disk
+  register: pv_disk1
+  when: powervault_me4_pool_type|lower == "virtual"
+  tags: install
+
+- name: Add disk group in linear pool
+  uri:
+    url: https://{{ inventory_hostname }}/api/add/disk-group/type/{{ powervault_me4_pool_type }}/disks/{{ powervault_me4_disk_range }}/level/{{ powervault_me4_raid_levels }}/{{ powervault_me4_disk_group_name }}
+    method: GET
+    body_format: json
+    validate_certs: no
+    use_proxy: no
+    headers:
+      {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
+  register: pv_disk2
+  when: powervault_me4_pool_type|lower == "linear"
   tags: install
 
 - name: Assert if disk group created or not
   fail:
-    msg: "{{ pv_disk.json.status[0].response }}"
-  when:  pv_disk.json.status[0] ['response-type'] == "Error"
+    msg: "{{ pv_disk1.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_type|lower == "virtual"
+    - pv_disk1.json.status[0] ['response-type'] == "Error"
+
+- name: Assert if disk group created or not
+  fail:
+    msg: "{{ pv_disk2.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_typ|lower e== "linear"
+    - pv_disk2.json.status[0] ['response-type'] == "Error"
 
 - name: Create volume1
   uri:
@@ -57,6 +80,7 @@
     headers:
       {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
   register: pv_vol1
+  when: powervault_me4_pool_type|lower == "virtual"
   tags: install
 
 - name: Create volume2
@@ -69,14 +93,59 @@
     headers:
       {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
   register: pv_vol2
+  when: powervault_me4_pool_type|lower == "virtual"
+  tags: install
+
+- name: Create volume1
+  uri:
+    url: https://{{ inventory_hostname }}/api/create/volume/size/{{ powervault_me4_volume_size }}/pool/{{ powervault_me4_disk_group_name }}/{{ powervault_me4_k8s_volume_name }}
+    method: GET
+    body_format: json
+    validate_certs: no
+    use_proxy: no
+    headers:
+      {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
+  register: pv_vol3
+  when: powervault_me4_pool_type|lower == "linear"
+  tags: install
+
+- name: Create volume2
+  uri:
+    url: https://{{ inventory_hostname }}/api/create/volume/size/{{ powervault_me4_volume_size }}/pool/{{ powervault_me4_disk_group_name }}/{{ powervault_me4_slurm_volume_name }}
+    method: GET
+    body_format: json
+    validate_certs: no
+    use_proxy: no
+    headers:
+      {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
+  register: pv_vol4
+  when: powervault_me4_pool_type|lower == "linear"
   tags: install
 
 - name: Assert if k8s_volume created correctly
   fail:
     msg: "{{ pv_vol1.json.status[0].response }}"
-  when: pv_vol1.json.status[0]['response-type'] == "Error"
+  when:
+    - powervault_me4_pool_type| lower == "virtual"
+    - pv_vol1.json.status[0]['response-type'] == "Error"
 
 - name: Assert if slurm_volume created correctly
   fail:
     msg: "{{ pv_vol2.json.status[0].response }}"
-  when: pv_vol2.json.status[0]['response-type'] == "Error"
+  when:
+    - powervault_me4_pool_type| lower == "virtual"
+    - pv_vol2.json.status[0]['response-type'] == "Error"
+
+- name: Assert if k8s_volume created correctly
+  fail:
+    msg: "{{ pv_vol3.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_type| lower == "linear"
+    - pv_vol3.json.status[0]['response-type'] == "Error"
+
+- name: Assert if slurm_volume created correctly
+  fail:
+    msg: "{{ pv_vol4.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_type|lower == "linear"
+    - pv_vol4.json.status[0]['response-type'] == "Error"

+ 1 - 2
control_plane/roles/powervault_me4/vars/main.yml

@@ -23,7 +23,7 @@ correct_disk_range: "Succes: Disk range is correct"
 wrong_pool: "Failed: Given pool value is wrong"
 wrong_partition: "Failed: Given partition is wrong"
 wrong_vol_size: "Failed: Given volume size is wrong"
-
+wrong_pool_type: "Failed: Given pool type value is wrong"
 
 # Usage: pv_me4_prereq.yml
 scsi_product_id: ME4
@@ -37,7 +37,6 @@ base_pv_file: "{{ role_path }}/../../input_params/base_vars.yml"
 # Usage: volume.yml
 lun1: 0
 lun2: 1
-type: virtual
 
 # Usage: ports.yml
 port_ip: 192.168.25.

+ 709 - 0
control_plane/test/test_control_plane.yml

@@ -0,0 +1,709 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Testase OMNIA_1.1_MS_TC_001
+# Test Case to validate the execution of control_plane.yml with valid inputs -- Default Test Case
+- name: OMNIA_1.1_MS_TC_001
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: VERIFY_OMNIA_01
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+             
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+          tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_002
+# Test Case to validate the execution of control_place.yml with no input
+- name: OMNIA_1.1_MS_TC_002
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_002
+
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc02 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane_common role
+          include_role:
+            name: ../roles/control_plane_common
+          vars:
+            base_vars_filename: ../input_params/base_vars.yml
+      rescue:
+        - name: Validate error
+          assert:
+            that: input_base_failure_msg in ansible_failed_result.msg
+            success_msg: "{{ input_config_check_success_msg }}"
+            fail_msg: "{{ input_config_check_fail_msg }}"
+      tags: Execute_common_role
+    
+# Testcase OMNIA_1.1_MS_TC_003 and OMNIA_1.1_MS_TC_004
+# Test Case to validate the execution of control_plane.yml with NFS share already present
+- name: OMNIA_1.1_MS_TC_003
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_003,TC_004
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+    
+    - name: Creating new control_plane.yml
+      copy:
+        dest: "../test_control_plane.yml"
+        content: |
+         - name: Executing omnia roles
+           hosts: localhost
+           connection: local
+           roles:
+              - control_plane_common
+              - control_plane_repo
+        mode: '0644'
+      tags: Replace_control_plane
+      
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook test_control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          tags: Execute_control_plane
+    
+    - block:
+        - name: Execute validation script
+          include_tasks: "{{ control_plane_validation_script_path }}" 
+          tags: Execute_Validation_Script
+      
+    - name: Delete newly created control_plane.yml
+      file:
+        state: absent
+        path: ../test_control_plane.yml
+      when: foo_stat.stat.exists
+      tags: Delete_test_control_plane
+
+# Testcase OMNIA_1.1_MS_TC_005
+# Test Case to validate the execution of control_plane.yml after a successful run and validate k8s pods along with services after reboot.
+- name: OMNIA_1.1_MS_TC_005
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_005
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+      
+    - name: Check uptime
+      command: uptime -p
+      register: system_uptime
+      changed_when: false
+      tags: Check_Uptime
+      
+    - name: Extracting data from system_uptime
+      set_fact:
+        uptime_number: "{{ system_uptime.stdout.split()[1] }}"
+        uptime_min: "{{ system_uptime.stdout.split()[2] }}"
+    
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      when: uptime_number|int > 15
+      tags: Replace_input
+      
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          when: uptime_number|int > 15
+          tags: Execute_control_plane
+          
+        - name: Reboot system
+          command: reboot
+          when: uptime_number|int > 15
+          tags: Reboot_System
+    
+    - block:
+        - name: Wait for 30sec for kubectl to get things ready
+          pause:
+            seconds: 200
+          when: (uptime_number| int <= 15) and (uptime_min == "minutes")
+
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+          when: (uptime_number| int <= 15) and (uptime_min == "minutes")
+          tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_006 and OMNIA_1.1_MS_TC_007
+# Test Case to validate the execution of control_plane.yml and after a successful run the user deletes/stops all pods
+- name: OMNIA_1.1_MS_TC_006
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_006,TC_007
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: "0644"
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+      
+    - name: Delete all containers
+      command: kubectl delete --all namespaces
+      changed_when: false
+      tags: Delete_Pods
+      
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+      
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_008
+# Test Case to validate the execution of control_plane.yml with infiniband=false, powervault=true and ethernet=true
+- name: OMNIA_1.1_MS_TC_008
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_008
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - name: Editing base_vars.yml for powervault to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ powervault_false }}"
+        replace: "{{ powervault_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for ethernet to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ ethernet_false }}"
+        replace: "{{ ethernet_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for infiniband to false
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ infiniband_true }}"
+        replace: "{{ infiniband_false }}"
+      tags: Edit_base_vars
+        
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_009
+# Test Case to validate the execution of control_plane.yml with infiniband=true, powervault=false and ethernet=true
+- name: OMNIA_1.1_MS_TC_009
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_009
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - name: Editing base_vars.yml for powervault to false
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ powervault_true }}"
+        replace: "{{ powervault_false }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for ethernet to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ ethernet_false }}"
+        replace: "{{ ethernet_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for infiniband to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ infiniband_false }}"
+        replace: "{{ infiniband_true }}"
+      tags: Edit_base_vars
+        
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_010
+# Test Case to validate the execution of control_plane.yml with infiniband=true, powervault=true and ethernet=false
+- name: OMNIA_1.1_MS_TC_010
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_010
+
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - name: Editing base_vars.yml for powervault to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ powervault_false }}"
+        replace: "{{ powervault_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for ethernet to false
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ ethernet_true }}"
+        replace: "{{ ethernet_false }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for infiniband to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ infiniband_false }}"
+        replace: "{{ infiniband_true }}"
+      tags: Edit_base_vars
+        
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_011
+# Test Case to validate the execution of control_plane.yml with firmware update set to False
+- name: OMNIA_1.1_MS_TC_011
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_011
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+      
+    - name: Set firmware update to false
+      replace:
+        path: ../input_params/idrac_vars.yml
+        regexp: "{{ fw_update_true }}"
+        replace: "{{ fw_update_false }}"
+      tags: Set_FW_Update
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+      
+    - name: Check if firmware updates folder exists
+      stat:
+        path: /var/nfs_repo/dellupdates
+      register: fw_update_dir
+      tags: Set_FW_Update
+      
+    - name: Verify firmware updates were downloaded
+      assert:
+        that:
+          - not fw_update_dir.stat.exists
+        success_msg: "{{ fw_success_validation }}"
+        fail_msg: "{{ fw_fail_validation }}"
+      tags: Set_FW_Update
+        
+# Testcase OMNIA_1.1_MS_TC_012
+# Test Case to validate the execution of control_plane.yml with firmware update set to true
+- name: OMNIA_1.1_MS_TC_012
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_012
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+      
+    - name: Set firmware update to true
+      replace:
+        path: ../input_params/idrac_vars.yml
+        regexp: "{{ fw_update_false }}"
+        replace: "{{ fw_update_true }}"
+      tags: Set_FW_Update
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+      
+    - name: Check if firmware updates folder exists
+      stat:
+        path: /var/nfs_repo/dellupdates
+      register: fw_update_dir
+      tags: Set_FW_Update
+      
+    - name: Verify firmware updates were downloaded
+      assert:
+        that:
+          - fw_update_dir.stat.exists
+        success_msg: "{{ fw_success_validation }}"
+        fail_msg: "{{ fw_fail_validation }}"
+      tags: Set_FW_Update
+
+# Testcase OMNIA_1.1_MS_TC_013
+# Test Case to validate the execution of control_plane.yml with docker login credential
+- name: OMNIA_1.1_MS_TC_013
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml  
+   
+  gather_subset:
+    - 'min'
+  tags: TC_013
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+    
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+      
+    - name: Change docker params in omnia_config.yml
+      replace:
+        path: ../../omnia_config.yml
+        regexp: "docker_username: .*$"
+        replace: 'docker_username: "{{ docker_user }}"'
+      tags: Set_Docker_Creds
+    
+    - name: Assert if the credentials are valid in test_control_plane_vars.yml
+      assert:
+        that:
+          - 'docker_user != "User"'
+          - 'docker_password != "Password"'
+        success_msg: "{{ valid_docker_creds }}"
+        fail_msg: "{{ invalid_docker_creds }}"
+      tags: Set_Docker_Creds
+    
+    - name: Change docker params in omnia_config.yml
+      replace:
+        path: ../../omnia_config.yml
+        regexp: "docker_password: .*$"
+        replace: 'docker_password: "{{ docker_password }}"'
+      tags: Set_Docker_Creds
+    
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          tags: Execute_control_plane
+    
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+          tags: Execute_Validation_Script
+      
+    - name: Fetch docker info
+      shell: docker login & sleep 3
+      register: new
+      changed_when: false
+      tags: Set_Docker_Creds
+
+    - name: Assert that docker was used to pull images 
+      assert:
+        that:
+          - "'Login did not succeed' in new.stderr"
+        success_msg: "{{ docker_success_validation }}"
+        fail_msg: "{{ docker_fail_validation }}"
+      tags: Set_Docker_Creds

+ 271 - 0
control_plane/test/test_control_plane_validation.yml

@@ -0,0 +1,271 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---  
+
+- block:
+
+    - name: Fetch Package info
+      package_facts:
+        manager: auto
+      
+    - name: Verify all packages are installed
+      assert:
+        that: "'{{ item }}' in ansible_facts.packages"
+        success_msg: "{{ install_package_success_msg }}"
+        fail_msg: "{{ install_package_fail_msg }}"
+      when: "'python-docker' not in item"
+      with_items: "{{ common_packages }}"
+      ignore_errors: true
+      
+    - name: Check login_vars is encrypted
+      command: cat {{ login_vars_filename }}
+      changed_when: false
+      register: config_content
+       
+    - name: Validate login file is encypted or not
+      assert:
+        that: "'$ANSIBLE_VAULT;' in config_content.stdout"
+        fail_msg: "{{ login_vars_fail_msg }}"
+        success_msg: "{{ login_vars_success_msg }}"
+            
+#  Installing a required package : JQ      
+    - name: Installing jq (JSON Query)
+      package:
+        name: "{{ test_package }}"
+        state: present
+           
+#  Checking if all the required pods are working
+    - name: Get pods info
+      shell: kubectl get pods --all-namespaces
+      register: all_pods_info
+          
+    - name: Check the count of pods
+      set_fact:
+         count: "{{ all_pods_info.stdout_lines|length - 1 }}"
+          
+    - name: Check if all the pods are running
+      assert:
+        that:
+          - "'Running' in all_pods_info.stdout_lines[{{ item }}]"
+        fail_msg: "{{ check_pods_fail_msg }}"
+        success_msg: "{{ check_pods_success_msg }}"
+      with_sequence: start=1 end={{ count }}
+      
+#  Checking if NFS Server is running and Custom ISO is created
+    - name: Get NFS Stat
+      shell: systemctl status nfs-idmapd
+      register: nfstat_info
+       
+    - name: Verify NFS Stat is running
+      assert:
+        that:
+          - "'Active: active (running)' in nfstat_info.stdout"
+        success_msg: "{{ nfs_share_success_msg }}"
+        fail_msg: "{{ nfs_share_fail_msg }}"
+        
+    - name: Check nfs mount point
+      stat:
+        path: "{{ nfs_mount_Path }}"
+      register: nfs_mount_info
+          
+    - name: Verify nfs share is mounted
+      assert:
+        that:
+          - "{{ nfs_mount_info.stat.exists }}"
+        success_msg: "{{ nfs_mount_success_msg }}"
+        fail_msg: "{{ nfs_mount_fail_msg }}"
+           
+    - name: Check Custom ISO
+      stat:
+        path: "{{ check_iso_path }}"
+      register: check_iso_info
+          
+    - name: Verify Custom ISO is created in the NFS repo
+      assert:
+        that:
+          - "{{ check_iso_info.stat.exists }}"
+        success_msg: "{{ check_iso_success_msg }}"
+        fail_msg: "{{ check_iso_fail_msg }}"
+      
+#  Checking if network-config container is running
+    
+    - name: Get Pod info for network-config
+      shell: |
+         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id) \(.metadata.name) \(.state)"'
+      register: network_config_pod_info
+          
+    - name: Get Pod Status for network-config
+      assert:
+        that:
+          - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
+        success_msg: "{{ network_config_pod_success_msg }}"
+        fail_msg: "{{ network_config_pod_fail_msg }}"
+         
+    - name: Get Pod facts
+      shell: |
+            crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id)"'
+      register: network_config_pod_fact
+         
+    - name: Parse container id for the pods
+      set_fact: 
+        container_id: "{{ network_config_pod_fact.stdout[1:-1] }}"   
+          
+    - name: Check dhcpd,xinetd service is running
+      command: crictl exec {{ container_id }} systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: pod_service_check
+      with_items:
+        - dhcpd
+        - xinetd
+            
+    - name: Verify dhcpd, xinetd service is running
+      assert:
+        that:
+          - "'active' in pod_service_check.results[{{ item }}].stdout"
+          - "'inactive' not in pod_service_check.results[{{ item }}].stdout"
+          - "'unknown' not in pod_service_check.results[{{ item }}].stdout"
+        fail_msg: "{{ pod_service_check_fail_msg }}"
+        success_msg: "{{ pod_service_check_success_msg }}"
+      with_sequence: start=0 end={{ pod_service_check.results|length - 1 }}
+         
+# Checking if cobbler-container is running
+    - name: Get Pod info for cobbler
+      shell: |
+         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id) \(.metadata.name) \(.state)"'
+      register: network_config_pod_info
+      
+    - name: Get Pod Status for cobbler
+      assert:
+        that:
+          - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
+        success_msg: "{{ cobbler_pod_success_msg }}"
+        fail_msg: "{{ cobbler_pod_fail_msg }}"
+      
+    - name: Get Pod facts for cobbler
+      shell: |
+            crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id)"'
+      register: network_config_pod_fact
+      
+    - name: Extract cobbler pod id
+      set_fact: 
+        cobbler_id: "{{ network_config_pod_fact.stdout[1:-1] }}"   
+      
+    - name: Check tftp,dhcpd,xinetd,cobblerd service is running
+      command: crictl exec {{ cobbler_id }} systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: pod_service_check
+      with_items:
+        - dhcpd
+        - tftp
+        - xinetd
+        - cobblerd
+        
+    - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
+      assert:
+        that:
+          - "'active' in pod_service_check.results[{{  item  }}].stdout"
+          - "'inactive' not in pod_service_check.results[{{  item  }}].stdout"
+          - "'unknown' not in pod_service_check.results[{{  item  }}].stdout"
+        fail_msg: "{{pod_service_check_fail_msg}}"
+        success_msg: "{{pod_service_check_success_msg}}"
+      with_sequence: start=0 end=3
+
+# Checking Cron-Jobs
+    - name: Check crontab list
+      command: crictl exec {{ cobbler_id }} crontab -l
+      changed_when: false
+      register: crontab_list
+
+    - name: Verify crontab list
+      assert:
+        that:
+          - "'* * * * * /usr/bin/ansible-playbook /root/tftp.yml' in crontab_list.stdout"
+          - "'*/5 * * * * /usr/bin/ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
+        fail_msg: "{{cron_jobs_fail_msg}}"
+        success_msg: "{{cron_jobs_success_msg}}"
+
+#  Checking subnet-manger pod is running and open sm is running 
+#  Comment if infiniband is not connected
+    - name: Fetch subnet-manager stats
+      shell: kubectl get pods -n subnet-manager 
+      register: sm_manager_info
+
+    - name: Verify subnet_manager container is running
+      assert:
+        that:
+          - "'Running' in sm_manager_info.stdout_lines[1]"
+        fail_msg: "{{subnet_manager_fail_msg}}"
+        success_msg: "{{subnet_manager_success_msg}}"
+
+# Checking awx pod is running
+
+    - name: Get Pod info for awx
+      shell: |
+         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "awx") | "\(.id) \(.metadata.name) \(.state)"'
+      register: awx_config_pod_info
+           
+    - name: Get Pod Status for awx
+      assert:
+        that:
+          - network_config_pod_info.stdout_lines[{{ item }}] | regex_search( "{{ container_info }}")
+        success_msg: "{{ awx_pod_success_msg }}"
+        fail_msg: "{{ awx_pod_fail_msg }}"
+      ignore_errors: yes
+      with_sequence: start=0 end={{ network_config_pod_info.stdout_lines |length - 1 }}
+          
+    - name: Get pvc stats
+      shell: |
+          kubectl get pvc -n awx -o json |jq '.items[] | "\(.status.phase)"'
+      register: pvc_stats_info
+            
+    - name: Verify if pvc stats is running
+      assert:
+        that:
+          - "'Bound' in pvc_stats_info.stdout"
+        fail_msg: "{{ pvc_stat_fail_msg }}"
+        success_msg: "{{ pvc_stat_success_msg }}"
+      with_sequence: start=0 end={{ pvc_stats_info.stdout_lines |length|int - 1 }}
+            
+    - name: Get svc stats
+      shell: kubectl get svc -n awx awx-service -o json
+      register: svc_stats_info
+           
+    - name: Verify if svc is up and running
+      assert:
+        that:
+          - "'Error from server (NotFound):' not in svc_stats_info.stdout"
+        success_msg: "{{ svc_stat_success_msg }}"
+        fail_msg: "{{ svc_stat_fail_msg }}"
+             
+    - name: Fetch Cluster IP from svc
+      shell: |
+          kubectl get svc -n awx -o json | jq '.items[] | select(.metadata.name == "awx-service") | "\(.spec.clusterIP)"'
+      register: cluster_ip_info
+           
+    - name: Check if connection to svc Cluster IP is enabled
+      uri:
+        url: http://{{ cluster_ip_info.stdout[1:-1] }}
+        follow_redirects: none
+        method: GET
+      ignore_errors: yes
+      register: cluster_ip_conn
+           
+    - name: Verify connection to svc cluster is working
+      assert:
+        that:
+          - cluster_ip_conn.status == 200
+        success_msg: "{{ svc_conn_success_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"
+        fail_msg: "{{ svc_conn_fail_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"

+ 47 - 0
control_plane/test/test_eth_mtu.yml

@@ -0,0 +1,47 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Get running config and reload PS
+  hosts: ethernet
+  connection: network_cli
+  gather_facts: no
+  collections:
+   - dellemc.os10
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+  tasks:
+   - name: Set facts
+     set_fact:
+       ansible_ssh_user: "{{ username }}"
+       ansible_ssh_pass: "{{ password }}"
+     tags: mtu,reload
+
+   - name: View running configurations
+     dellos10_command:
+       commands: show interface ethernet {{ validation_port }}
+     register: var1
+     tags: mtu
+
+   - name: Print config
+     debug:
+       msg: "{{ var1 }}"
+     tags: mtu
+
+   - name: Reload switch
+     dellos10_command:
+       commands: 
+          - command: 'reload'
+            prompt: '\[confirm yes/no\]:?$'
+            answer: 'yes'
+     tags: reload

+ 346 - 0
control_plane/test/test_ethernet_config.yml

@@ -0,0 +1,346 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+---
+# Testacase OMNIA_1.1_EF_TC_007
+# Execute ethernet.yml with both valid Global and interface configs in ethernet_config.yml
+- name: OMNIA_1.1_EF_TC_007
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_007
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Back up of ethernet_config.yml
+      copy:
+        src: "{{ ethernet_config_dir }}"
+        dest: "{{ ethernet_config_backup_dir }}"
+        mode: "{{ file_perm }}"
+      tags: TC_007
+
+    - name: Executing ethernet role with default ethernet_config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"      
+
+    - block:
+       - name: Validate default flow
+         assert:
+           that:
+             - ethernet_success_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+    - name: Set MTU of port {{ port_num }}
+      lineinfile:
+       dest: "{{ ethernet_config_dir }}"
+       insertbefore: "{{ search_line }}"
+       line: "{{ add_mtu_line }}"
+    
+    - name: Executing ethernet role with default ethernet_config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - name: Getting MTU of ethernet {{ validation_port }}
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'mtu'
+      changed_when: false
+      register: mtu_out
+      tags: TC_007,TC_002
+       
+    - name: Validate role exec output pre and post MTU addition
+      assert:
+        that:          
+          - validate_mtu_line in mtu_out.stdout
+        success_msg: "{{ success_message }}"
+        fail_msg: "{{ fail_case }}"
+      changed_when: false
+      failed_when: false
+      tags: TC_007
+
+# Testacase OMNIA_1.1_EF_TC_005
+# Execute ethernet.yml with save_config set to False
+- name: OMNIA_1.1_EF_TC_005
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_005
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Reload switch
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'reload'
+      changed_when: false
+    
+    - name: Pausing for switch to come up
+      pause:
+        minutes: "{{ time_to_pause }}"
+        
+    - name: Getting MTU of ethernet {{ validation_port }}
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'mtu'
+      changed_when: false
+      register: mtu_out
+      
+    - block:
+       - name: Validate that MTU is changed
+         assert:
+           that:
+             - validate_mtu_line not in mtu_out.stdout
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+         failed_when: false
+                
+# Testacase OMNIA_1.1_EF_TC_006
+# Execute ethernet.yml with save_config set to True
+- name: OMNIA_1.1_EF_TC_006
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_006
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Set save_changes_to_startup to True in ethernet_vars
+      ansible.builtin.replace:
+        dest: "{{ ethernet_config_dir }}"
+        regexp: 'save_changes_to_startup: false'
+        replace: 'save_changes_to_startup: True'
+        
+    - name: Execute network_ethernet role as port 4 has mtu set in ethernet_vars
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - name: Reload switch
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'reload'
+      changed_when: false
+    
+    - name: Pausing for switch to come up
+      pause:
+        minutes: "{{ time_to_pause }}"
+        
+    - name: Getting MTU of ethernet {{ validation_port }}
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'mtu'
+      changed_when: false
+      register: mtu_out
+    
+    - block:
+       - name: Validate that MTU is changed
+         assert:
+           that:
+             - validate_mtu_line in mtu_out.stdout
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_010
+# Execute ethernet.yml with invalid Global and correct interface configs in ethernet_config.yml
+- name: OMNIA_1.1_EF_TC_010
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_010
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Making interface config invalid
+      lineinfile:
+        path: "{{ ethernet_config_dir }}"
+        insertafter: 'os10_config:'
+        line: 'gibberish inserted'
+      tags: TC_007
+
+    - name: Executing ethernet role with invalid global config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+
+    - block:
+       - name: Validate role exec output
+         assert:
+           that:
+             - ethernet_fail_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+
+# Testcase OMNIA_1.1_EF_TC_009
+# Validation of ethernet default configuration
+- name: OMNIA_1.1_EF_TC_009
+  hosts: ethernet
+  gather_facts: false
+  tags: VERIFY_OMNIA_01
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml    
+  tasks:
+    - name: Executing ethernet role
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}" 
+    
+    - block:
+       - name: Validate default flow
+         assert:
+           that:
+             - ethernet_success_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+         
+# Testcase OMNIA_1.1_EF_TC_011
+# Execute ethernet.yml with valid Global  and incorrect interface configs in ethernet_config.yml 
+- name: OMNIA_1.1_EF_TC_011
+  hosts: ethernet
+  gather_facts: false
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Making interface config invalid
+      lineinfile:
+        path: "{{ ethernet_config_dir }}"
+        insertafter: 'os10_interface:'
+        line: 'gibberish inserted'
+        
+    - name: Executing ethernet role with invalid interface config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+
+    - block:
+       - name: Validate role exec output
+         assert:
+           that:
+             - ethernet_fail_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"      
+
+
+# Testcase OMNIA_1.1_EF_TC_008
+# Execute ethernet.yml with only Global and no interface configs in ethernet_config.yml 
+- name: OMNIA_1.1_EF_TC_008
+  hosts: ethernet
+  gather_facts: false
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Retrieving ethernet_config backup
+      copy:
+        src: "{{ ethernet_config_backup_dir }}"
+        dest: "{{ ethernet_config_dir }}"
+        mode: "{{ file_perm }}"
+      tags: TC_008
+    
+    - name: Removing interface config from ethernet_config
+      ansible.builtin.command: sed -i '22,117d' "{{ ethernet_config_dir }}"
+      args:
+       warn: no
+      changed_when: false
+      tags: TC_008
+      
+    - name: Executing ethernet role with no interface config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - block:
+       - name: Validate default flow
+         assert:
+           that:
+             - ethernet_success_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+    - name: Restoring original ethernt_config
+      copy:
+        src: "{{ ethernet_config_backup_dir }}"
+        dest: "{{ ethernet_config_dir }}"
+        mode: "{{ file_perm }}"
+      tags: TC_008
+      
+    - name: Set save_changes_to_startup to True in ethernet_vars
+      ansible.builtin.replace:
+        dest: "{{ ethernet_config_dir }}"
+        regexp: 'save_changes_to_startup: false'
+        replace: 'save_changes_to_startup: True'
+        
+    - name: Execute network_ethernet role as port 4 has mtu set in ethernet_vars
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - name: Set save_changes_to_startup to False in ethernet_vars
+      ansible.builtin.replace:
+        dest: "{{ ethernet_config_dir }}"
+        regexp: 'save_changes_to_startup: True'
+        replace: 'save_changes_to_startup: False'

+ 157 - 0
control_plane/test/test_ethernet_fact.yml

@@ -0,0 +1,157 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+---
+# Testcase OMNIA_1.1_EF_TC_002
+# Execute ethernetfacts.yml with valid IP with valid credentials in ethernet inventory group
+- name: OMNIA_1.1_EF_TC_002
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_002
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Execute ethernet_facts with valid creds and valid IP
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      tags: TC_002
+   
+    - block:
+       - name: Validate default flow with valid IP and valid credentials
+         assert:
+           that:
+             - "'successful' in job_status.status"
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_003
+# Execute ethernetfacts.yml with Invalid IP in ethernet inventory group
+- name: OMNIA_1.1_EF_TC_003
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_003
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: setting ip
+      set_fact:
+        eth_host_name: "{{ random_ip }}"
+         
+    - name: Execute ethernet_facts with random IP
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+
+    - block:
+        - name: Validate invalid IP and valid credentials
+          assert:
+            that:
+              - "'failed' in job_status.status"
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+          changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_001
+# Execute ethernetfacts.yml with no hosts in ethernet inventory group
+- name: OMNIA_1.1_EF_TC_001
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_001
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Execute ethernet_facts with no host details
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"       
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+    
+    - block:
+       - name: Validate no hosts and valid credentials
+         assert:
+           that:
+             - "'successful' in job_status.status"
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_004
+# Execute ethernetfacts.yml with valid IP in ethernet inventory group with incorrect credentials
+- name: OMNIA_1.1_EF_TC_004
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_004
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Making ethernet_credentials invalid
+      tower_credential:
+        name: "ethernet_credential"
+        credential_type: "Machine"
+        inputs:
+          username: "{{ invalid_username }}"
+       
+    - name: Execute ethernet_facts with invalid credentials
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+     
+    - block:
+       - name: Validate valid IP and invalid credentials
+         assert:
+           that:
+             - "'failed' in job_status.status"
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+         
+    - name: Set credentials back to default
+      tower_credential:
+        name: "ethernet_credential"
+        credential_type: "Machine"
+        inputs:
+          username: "{{ username }}"
+          password: "{{ password }}"

+ 5 - 0
control_plane/test/test_ethernet_inventory

@@ -0,0 +1,5 @@
+[ethernet]
+1.2.3.4
+
+[ethernet:vars]
+ansible_network_os= dellemc.os10.os10

+ 150 - 0
control_plane/test/test_vars/base_vars.yml

@@ -0,0 +1,150 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Path to directory hosting ansible config file (ansible.cfg file)
+# Default value is /etc/ansible
+# This directory is on the host running ansible, if ansible is installed using dnf
+# If ansible is installed using pip, this path should be set
+ansible_conf_file_path: /etc/ansible
+
+# This variable is used to enable ethernet switch configuration
+# It accepts boolean values "true" or "false". 
+# By default its value is "false".
+# If ethernet switch support is needed set this to "true"
+ethernet_switch_support: true
+
+# This variable is used to enable infiniband switch configuration
+# It accepts boolean values "true" or "false". 
+# By default its value is "false".
+# If infiniband configuration is needed set this to "true"
+ib_switch_support: true
+
+# This variable is used to enable powervault configuration
+# It accepts boolean values "true" or "false". 
+# By default its value is "false".
+# If powervault configuration is needed set this to "true"
+powervault_support: false
+
+# The nic/ethernet card that will be connected to the public internet.
+# Default value of nic is eno2
+public_nic: "eno2"
+
+# Kubernetes pod network CIDR for appliance k8s network
+# Make sure this value does not overlap with any of the host networks.
+# Default value is "192.168.0.0/16"
+appliance_k8s_pod_net_cidr: "192.168.0.0/16"
+
+### Usage: provision_idrac, network_ib, network_ethernet, powervault_me4 ###
+
+# The trap destination IP address is the IP address of the SNMP Server where the trap will be sent
+# If this variable is left blank, it means SNMP will be disabled
+# Provide a valid SNMP server IP
+snmp_trap_destination: ""
+
+# Provide the snmp community name needed
+# By default this is set to "public"
+snmp_community_name: "public"
+
+### Usage: webui_awx ###
+
+# Organization name that is created in AWX.
+# The default value is “DellEMC”
+awx_organization: "DellEMC"
+
+### Usage: provision_cobbler, provision_idrac ###
+
+# This variable is used to set node provisioning method
+# It accepts values: idrac, pxe
+# Default value is "idrac"
+# If provisioning needs to be done through cobbler, set it to "pxe"
+# If idrac license is not present, provisioning mode will be set to "pxe"
+provision_method: "idrac"
+
+# This is the timezone that will be set during provisioning of OS
+# Available timezones are provided in control_plane/common/files/timezone.txt
+# Default timezone will be "GMT"
+# Some of the other available timezones are EST,CET,MST,CST6CDT,PST8PDT
+timezone: "GMT"
+
+# This is the language that will be set during provisioning of the OS
+# Default language supported is "en-US"
+language: "en-US"
+
+# This is the path where the user has to place the iso image that needs to be provisioned in target nodes.
+# The iso file should be CentOS7-2009-minimal edition.
+# Other iso files are not supported.
+# Mandatory value required
+iso_file_path: "/root/CentOS-7-x86_64-Minimal-2009.iso"
+
+# Default lease time that will be used by dhcp
+# Its unit is seconds
+# Min: 21600 seconds
+# Default: 86400 seconds
+# Max: 31536000 seconds
+# Mandatory value required
+default_lease_time: "86400"
+
+### Usage: control_plane_device ###
+
+# The nic/ethernet card that needs to be connected to provision 
+# the fabric, idrac and powervault.
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is eno1
+mngmnt_network_nic: "eno1"
+
+# The dhcp range for assigning the IPv4 address
+# Example: 172.17.0.1
+# Mandatory value required
+mngmnt_network_dhcp_start_range: "172.19.0.101"
+mngmnt_network_dhcp_end_range: "172.19.0.200"
+
+# The mapping file consists of the MAC address and its respective IP address and Hostname.
+# The format of mapping file should be MAC,Hostname,IP and must be a CSV file.
+# Eg: xx:yy:zz:aa:bb,172.17.0.5
+# A template for mapping file exists in omnia/examples and is named as mapping_device_file.csv.
+# This depicts the path where user has kept the mapping file for DHCP configurations.
+mngmnt_mapping_file_path: ""
+
+### Usage: provision_cobbler ###
+
+# The nic/ethernet card that needs to be connected to provision the OS of bare metal servers
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is eno3
+host_network_nic: "eno3"
+
+# The dhcp range for assigning the IPv4 address
+# Example: 172.17.0.1
+# Mandatory value required
+host_network_dhcp_start_range: "172.17.0.101"
+host_network_dhcp_end_range: "172.17.0.200"
+
+# The mapping file consists of the MAC address and its respective IP address and Hostname.
+# The format of mapping file should be MAC,Hostname,IP and must be a CSV file.
+# Eg: xx:yy:zz:aa:bb,server,172.17.0.5,Group(if any)
+# A template for mapping file exists in omnia/examples and is named as mapping_host_file.csv.
+# This depicts the path where user has kept the mapping file for DHCP configurations.
+host_mapping_file_path: ""
+
+### Usage: control_plane_ib ###
+
+# The nic/ethernet card that needs to be connected to configure infiniband switch
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is ib0
+ib_network_nic: "ib0"
+
+# The dhcp range for assigning the IPv4 address
+# Example: 172.17.0.1
+ib_network_dhcp_start_range: "172.25.0.101"
+ib_network_dhcp_end_range: "172.25.0.200"

+ 81 - 0
control_plane/test/test_vars/login_vars.yml

@@ -0,0 +1,81 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+### Usage: provision_cobbler, provison_idrac ###
+
+# Password used while deploying OS on bare metal servers.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+# Mandatory value required
+provision_password: "test@123"
+
+### Usage: provision_cobbler ###
+
+# Password used for cobbler
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+# Mandatory value required
+cobbler_password: "test@123"
+
+### Usage: provision_idrac ###
+
+# The username for idrac
+# The username must not contain -,\, ',"
+# Mandatory value required
+idrac_username: "root"
+
+# Password used for idrac
+# The password must not contain -,\, ',"
+# Mandatory value required
+idrac_password: "calvin"
+
+### Usage: webui_awx ###
+
+# Password used for awx UI
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+#awx_password: ""
+
+### Usage: network_ethernet ###
+
+# The username for ethernet switch
+# The username must not contain -,\, ',"
+ethernet_switch_username: "admin"
+
+# Password used for ethernet switch
+# The password must not contain -,\, ',"
+ethernet_switch_password: "admin"
+
+### Usage: network_ib ###
+
+# The username for infiniband switch
+# The username must not contain -,\, ',"
+ib_username: "admin"
+
+# Password used for infiniband switch
+# The password must not contain -,\, ',"
+ib_password: "admin"
+
+### Usage: powervault_me4 ###
+
+# The username for powervault_me4
+# The username must not contain -,\, ',"
+powervault_me4_username: "manage"
+
+# Password used for powervault_me4
+# The password should have atleast one uppercase character, one lowercase character,
+# one numeric character and one non-alphanumeric character.
+# The password must not contain -,\, ',", . , < , comma(,)
+powervault_me4_password: "Test@123"

+ 94 - 0
control_plane/test/test_vars/test_control_plane_vars.yml

@@ -0,0 +1,94 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+#usage: test_control_plane_validation.yml
+
+port_no: 22
+os_name: CentOS
+os_version: '8.4'
+internet_status: "Failed. No Internet connection. Make sure network is up."
+check_os_success_msg: "OS and Version are supported"
+check_os_fail_msg: "Unsupported OS or OS version. OS should be {{ os_name }} and Version should be {{ os_version }} or more"
+
+input_params_folder: "../input_params/"
+control_plane_dir: "../"
+control_plane_validation_script_path: test_control_plane_validation.yml
+
+input_files_tc01:
+  - "test_vars/base_vars.yml"
+  - "test_vars/login_vars.yml"
+
+input_files_tc02:
+  - "test_vars/login_vars.yml"
+
+input_config_check_success_msg: "control_plane.yml validation passed"
+input_config_check_fail_msg: "control_plane.yml validation failed"
+
+install_package_success_msg: "{{item}} is installed"
+install_package_fail_msg: "{{item}} is not installed"
+login_vars_filename: "../input_params/login_vars.yml"
+login_vars_fail_msg: "Login vars is not encrypted"
+login_vars_success_msg: "Login vars is encrypted"
+
+fw_update_false: "firmware_update_required: false"
+fw_update_true: "firmware_update_required: true"
+fw_success_validation: "Validation Success for firmware update"
+fw_fail_validation: "Validation Failed for firmware update"
+docker_success_validation: "Docker Validated successfully"
+docker_fail_validation: "Docker not validated"
+
+test_package: 'jq'
+check_pods_success_msg: "Pod is running"
+check_pods_fail_msg: "Pods is not running"
+nfs_share_success_msg: "NFS Server is running"
+nfs_share_fail_msg: "NFS Server is not running"
+
+nfs_mount_Path: "/var/nfs_repo"
+nfs_mount_success_msg: "NFS repo is mounted"
+nfs_mount_fail_msg: "NFS repo is not mounted"
+check_iso_path: '/var/nfs_repo/unattended_centos7.iso'
+check_iso_success_msg: "ISO is present in the NFS repo"
+check_iso_fail_msg: "ISO is not present in the NFS repo"
+
+pod_service_check_fail_msg: "Service is not running"
+pod_service_check_success_msg: "Service is up and running"
+network_config_pod_success_msg: "Network-Config Pod is running"
+network_config_pod_fail_msg: "Network-Config Pod is not running"
+awx_pod_success_msg: "awx pod is up and running."
+awx_pod_fail_msg: "awx pod is not running"
+pvc_stat_success_msg: "pvc stat is running"
+pvc_stat_fail_msg: "pvc stat is not running"
+svc_stat_success_msg: "svc stat is running"
+svc_stat_fail_msg: "svc stat is not running"
+svc_conn_success_msg: "Connection to svc is successful at"
+svc_conn_fail_msg: "Connection to svc failed at: "
+cobbler_pod_success_msg: "Cobbler service is running"
+cobbler_pod_fail_msg: "Cobbler service is not running"
+subnet_manager_success_msg: "Subnet Manager is running"
+subnet_manager_fail_msg: "Subnet Manager is not running"
+cron_jobs_success_msg: "Cron jobs are running"
+cron_jobs_fail_msg: "Cron jobs are not running"
+container_info: "CONTAINER_RUNNING"
+ethernet_true: "ethernet_switch_support: true"
+ethernet_false: "ethernet_switch_support: false"
+powervault_true: "powervault_support: true"
+powervault_false: "powervault_support: false"
+infiniband_true: "ib_switch_support: true"
+infiniband_false: "ib_switch_support: false"
+# Update
+docker_user: "User"
+docker_password: "Password"
+valid_docker_creds: "Credentials are valid"
+invalid_docker_creds: "Please input valid docker username and password in test_control_plane_vars.yml"

+ 56 - 0
control_plane/test/test_vars/test_ethernet_vars.yml

@@ -0,0 +1,56 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+---
+# Usage : test_ethernet_facts.yml
+failed_msg: "Unexpected scenario"
+success_message: "Execution successful"
+eth_inventory_name: "ethernet_inventory"
+eth_host_name: "100.96.23.241"
+fact_template_value: "ethernet_template"
+fact_job_name: "ethernet_template"
+eth_facts_playbook_path: "control_plane/tools/ethernet_facts.yml"
+awx_script_path: "test_prepare.yml"
+random_ip: 100.100.100.100
+invalid_username: "invalid_username"
+username: admin
+password: admin
+
+# Usage : test_ethernet_config.yml
+ethernet_dir: "ethernet.yml"
+ethernet_config_dir: "../input_params/ethernet_vars.yml"
+ethernet_config_backup_dir: "ethernet_config_backup.yml"
+get_mtu_dir: "test_eth_mtu.yml"
+appliance_dir: "/root/ethernet/control_plane"
+fail_case: "Expected error, please check the configurations"
+sed_condition: '/Port 4/a mtu2345'
+eth_template_value: "ethernet_template"
+eth_job_name: "ethernet_template"
+eth_playbook_path: "control_plane/ethernet.yml"
+inventory_dir: "test_ethernet_inventory"
+login_vars_path: "../input_params/login_vars.yml"
+login_vars_vault_path: "../input_params/.login_vault_key"
+tower_config_file_path: "../roles/webui_awx/files/.tower_cli.cfg"
+tower_vault_file_path: "../roles/webui_awx/files/.tower_vault_key"
+file_perm: '0644'
+
+# Usage : test_eth_mtu.yml, test_ethernet_config.yml
+validation_port: 1/1/4:1
+port_num: 4
+search_line: "    ethernet 1/1/5:"
+add_mtu_line: "      mtu: 2345"
+time_to_pause: 4
+validate_mtu_line: "MTU 2345 bytes"
+ethernet_success_msg: "successful"
+ethernet_fail_msg: "failed"

+ 3 - 0
examples/host_mapping_file_one_touch.csv

@@ -0,0 +1,3 @@
+MAC,Hostname,IP,Component_role
+xx:yy:zz:aa:bb,server,1.2.3.4,manager
+aa:bb:cc:dd:ee,server2,10.10.11.12,nfs_node

+ 3 - 0
examples/host_mapping_file_os_provisioning.csv

@@ -0,0 +1,3 @@
+MAC,Hostname,IP
+xx:yy:zz:aa:bb,server,1.2.3.4
+aa:bb:cc:dd:ee,server2,10.10.11.12

+ 2 - 0
examples/mapping_device_file.csv

@@ -0,0 +1,2 @@
+MAC,IP
+xx:yy:zz:aa:bb,1.2.3.4

+ 0 - 2
examples/mapping_file.csv

@@ -1,2 +0,0 @@
-MAC,Hostname,IP
-xx:yy:zz:aa:bb,server,1.2.3.4

+ 91 - 0
roles/cluster_validation/tasks/fetch_powervault_status.yml

@@ -0,0 +1,91 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Check tower_cli.cfg is encrypted
+  command: cat "{{ tower_config_path }}"
+  changed_when: false
+  register: awx_content
+  run_once: true
+  no_log: true
+
+- name: Decrpyt tower_cli.cfg
+  command: ansible-vault decrypt "{{ tower_config_path }}" --vault-password-file "{{ tower_vault_path }}"
+  changed_when: false
+  run_once: true
+  when: "'$ANSIBLE_VAULT;' in awx_content.stdout"
+
+- name: Fetch awx host
+  command: grep "host:" "{{ tower_config_path }}"
+  register: fetch_awx_host
+  changed_when: false
+  run_once: true
+
+- name: Fetch awx username
+  command: grep "username:" "{{ tower_config_path }}"
+  register: fetch_awx_username
+  changed_when: false
+  run_once: true
+  no_log: true
+
+- name: Fetch awx password
+  command: grep "password:" "{{ tower_config_path }}"
+  register: fetch_awx_password
+  changed_when: false
+  run_once: true
+  no_log: true
+
+- name: Set awx variables
+  set_fact:
+    awx_host: "{{ fetch_awx_host.stdout | regex_replace('host: ','') }}"
+    awx_username: "{{ fetch_awx_username.stdout | regex_replace('username: ','') }}"
+    awx_password: "{{ fetch_awx_password.stdout | regex_replace('password: ','') }}"
+  no_log: true
+
+- name: Encrypt tower_cli.cfg
+  command: ansible-vault encrypt "{{ tower_config_path }}" --vault-password-file "{{ tower_vault_path }}"
+  changed_when: false
+  run_once: true
+  when: "'$ANSIBLE_VAULT;' in awx_content.stdout"
+
+- name: Get inventory list
+  command: >-
+    awx --conf.host "{{ awx_host }}" --conf.username "{{ awx_username }}" --conf.password "{{ awx_password }}"
+    inventory list -f human --filter "name"
+  register: inventory_list
+  run_once: true
+  changed_when: false
+  no_log: true
+
+- block:
+    - name: Fetch powervault_me4_inventory
+      command: >-
+        awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+        hosts list --inventory "{{ powervault_inventory_name }}" -f human --filter "name"
+      register: fetch_inventory
+      run_once: true
+      changed_when: false
+      no_log: true
+
+    - name: Set powervault_status
+      set_fact:
+        powervault_status: true
+      when: fetch_inventory.stdout_lines[2:] | length > 0
+
+    - name: Create powervault_me4 group
+      add_host:
+        name: "{{ item | regex_replace(' ','') }}"
+        groups: "{{ powervault_group }}"
+      when: powervault_status
+      with_items: "{{ fetch_inventory.stdout_lines[2:] }}"

+ 63 - 8
roles/cluster_validation/tasks/main.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 ---
+
 - name: Perform validations
   include_tasks: validations.yml
 
@@ -20,18 +21,72 @@
 
 - name: Check if omnia is running from AWX
   block:
-    - name: Appliance status
+    - name: Initialize variables
       set_fact:
-        appliance_status: false
+        control_plane_status: false
+        powervault_status: false
+        nfs_node_status: false
 
     - name: Check AWX instance
-      command: awx-manage --version
+      command: awx --version
+      changed_when: false
+      failed_when: false
+      register: awx_version_check
+
+    - name: Check AWX hostname
+      command: hostname
+      changed_when: false
+      register: awx_hostname
 
-    - name: Update appliance status
+    - name: Set control_plane_status
       set_fact:
-        appliance_status: true
+        control_plane_status: true
+      when:
+        - not awx_version_check.failed
+        - '"awx-" in awx_hostname.stdout'
+
+    - name: Set NFS node status
+      set_fact:
+        nfs_node_status: true
+      when:
+        - control_plane_status
+        - groups['nfs_node'] | length == 1
+
+    - name: Fetch powervault status
+      include_tasks: fetch_powervault_status.yml
+      when: nfs_node_status
 
-  rescue:
+- name: omnia.yml runing on host
+  block:
     - name: Passwordless SSH status
       debug:
-        msg: "omnia.yml running on host"
+        msg: "omnia.yml running on host"
+
+    - name: Check whether ansible config file exists
+      stat:
+        path: "{{ ansible_conf_file_path }}/ansible.cfg"
+      register: ansible_conf_exists
+
+    - name: Create the directory if it does not exist
+      file:
+        path: "{{ ansible_conf_file_path }}"
+        state: directory
+        mode: "{{ file_perm }}"
+      when: not ansible_conf_exists.stat.exists
+
+    - name: Create ansible config file if it does not exist
+      copy:
+        dest: "{{ ansible_conf_file_path }}/ansible.cfg"
+        mode: "{{ file_perm }}"
+        content: |
+          [defaults]
+          log_path = /var/log/omnia.log
+      when: not ansible_conf_exists.stat.exists
+
+    - name: Set omnia.log file
+      replace:
+        path: "{{ ansible_conf_file_path }}/ansible.cfg"
+        regexp: '#log_path = /var/log/ansible.log'
+        replace: 'log_path = /var/log/omnia.log'
+      when: ansible_conf_exists.stat.exists
+  when: not control_plane_status

+ 10 - 2
roles/cluster_validation/tasks/validations.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 ---
+
 - name: Validate skip tags
   fail:
     msg: "{{ skip_tag_fail_msg }}"
@@ -27,4 +28,11 @@
   assert:
     that: "groups['compute'] | length | int >= 1"
     fail_msg: "{{ compute_group_fail_msg }}"
-    success_msg: "{{ compute_group_success_msg }}"
+    success_msg: "{{ compute_group_success_msg }}"
+
+- name: NFS group to contain exactly 1 node
+  assert:
+    that: "groups['nfs_node'] | length | int == 1"
+    fail_msg: "{{ nfs_node_group_fail_msg }}"
+    success_msg: "{{ nfs_node_group_success_msg }}"
+  when: groups['nfs_node']

+ 13 - 4
roles/cluster_validation/vars/main.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -12,7 +12,8 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 ---
-#Usage: fetch_password.yml
+
+# Usage: fetch_password.yml
 config_filename: "omnia_config.yml"
 config_vaultname: .omnia_vault_key
 min_length: 8
@@ -39,7 +40,7 @@ input_config_failure_msg: "Input parameters cannot be empty"
 login_node_required_success_msg: "Login_node_required successfully validated"
 login_node_required_fail_msg: "Failed. login_node_required can be either true or false"
 
-#Usage: validations.yml
+# Usage: validations.yml
 skip_tag_fail_msg: "Can't skip both slurm and kubernetes"
 manager_group_fail_msg: "manager group should contain exactly 1 node"
 manager_group_success_msg: "manager group check passed"
@@ -48,4 +49,12 @@ compute_group_success_msg: "compute group check passed"
 disjoint_fail_msg: "manager and compute groups should be disjoint"
 disjoint_success_msg: "manager and compute groups are disjoint"
 login_node_group_fail_msg: "Login node group should contain atleast 1 node when login_node_required is true"
-login_node_group_success_msg: "Login node group check passed when login_node_required is true"
+login_node_group_success_msg: "Login node group check passed when login_node_required is true"
+nfs_node_group_fail_msg: "nfs_node group should contain exactly 1 node"
+nfs_node_group_success_msg: "nfs_node group check passed"
+
+# Usage: fetch_powervault_status.yml
+tower_config_path: "{{ playbook_dir }}/control_plane/roles/webui_awx/files/.tower_cli.cfg"
+tower_vault_path: "{{ playbook_dir }}/control_plane/roles/webui_awx/files/.tower_vault_key"
+powervault_inventory_name: "powervault_me4_inventory"
+powervault_group: "powervault_me4"

+ 901 - 0
test/test_omnia_1.1.yml

@@ -0,0 +1,901 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+#Testcase OMNIA_1.1_US_CRD_TC_001
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with default parameters
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: localhost
+
+  tasks:
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_001
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_001
+
+#Testcase OMNIA_1.1_US_CRD_TC_005
+# Execute omnia.yml with addition of new compute node
+- name: OMNIA_1.1_US_CRD_TC_005
+  hosts: localhost
+
+  tasks:
+  
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_005
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_005
+
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_005
+
+    - name: Creating test inventory file 
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+        
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}          
+          {{ host5 }}
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+
+    - name: Verify if new compute node is added
+      command: ansible --list-hosts compute -i test_inventory.yml
+      changed_when: false
+      register: compute_info
+      tags: TC_005
+
+    - name: Validate compute node
+      assert:
+         that: 
+           - "'{{ host5 }}' in compute_info.stdout"
+         success_msg: "{{ compute_node_success_msg }}"
+         fail_msg: "{{ compute_node_fail_msg }}"
+      tags: TC_005
+
+#Testcase OMNIA_1.1_US_CRD_TC_006
+# Execute omnia.yml after removal of new compute node
+- name: OMNIA_1.1_US_CRD_TC_006
+  hosts: localhost
+
+  tasks:
+
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_006
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_006
+
+    - name: Delete one compute node
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }} 
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+      tags: TC_006
+       
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_006
+
+    - name: Verify if one compute node is deleted
+      command:  ansible --list-hosts compute -i test_inventory.yml
+      register: compute_info
+      changed_when: false
+      tags: TC_006
+
+    - name: Validate compute node
+      assert:
+         that: 
+           - "'{{ host5 }}' not in compute_info.stdout"
+         success_msg: "{{ compute_node_del_success_msg }}"
+         fail_msg: "{{ compute_node_del_fail_msg }}"
+      tags: TC_006
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_006
+
+#Testcase OMNIA_1.1_US_CRD_TC_008
+# Execute Jupyterhub.yml and then Kubeflow.yml
+- name: OMNIA_1.1_US_CRD_TC_008
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_008
+
+    - name: Install Jupyterhub
+      command: ansible-playbook platforms/jupyterhub.yml -i inventory
+      changed_when: false
+      args:
+        chdir: ../
+      tags: TC_008
+
+    - name: Install Kubeflow
+      command: ansible-playbook platforms/kubeflow.yml -i inventory
+      changed_when: false
+      args:
+        chdir: ../
+      tags: TC_008
+
+- name: OMNIA_1.1_US_CRD_TC_008
+  hosts: manager
+  vars_files:
+    - test_vars/test_jupyterhub_vars.yml
+    - test_vars/test_kubeflow_vars.yml
+
+  tasks:
+    - name: Waiting for the pods deployment
+      pause:
+        minutes: 5
+      tags: TC_008
+      
+    - name: Checking K8s services
+      command: kubectl get services
+      register: k8s_services
+      changed_when: false
+      failed_when: True
+      tags: TC_008
+
+    - name: Validating JupyterHub services
+      assert:
+        that:
+          - "'hub' in k8s_services.stdout"
+          - "'proxy-public' in k8s_services.stdout"
+          - "'proxy-api' in k8s_services.stdout"
+        fail_msg: "{{ jupyterhub_services_fail_msg }}"
+        success_msg: "{{ jupyterhub_services_success_msg }}"
+      tags: TC_008
+    
+    - name: Checking all running pods under jupyterhub namespace
+      command: kubectl get pods --namespace jupyterhub --field-selector=status.phase=Running
+      register: jupyterhub_running_pods
+      changed_when: false
+      failed_when: True
+      tags: TC_008
+
+    - name: Validating JupyterHub pods
+      assert:
+        that:
+          - "'hub' in default_jupyterhub_pods.stdout"
+          - "'proxy' in default_jupyterhub_pods.stdout"
+        fail_msg: "{{ jupyterhub_pods_fail_msg }}"
+        success_msg: "{{ jupyterhub_pods_success_msg }}"
+      tags: TC_008
+
+    - name: Checking installed Kubeflow version
+      command: kfctl version
+      register: kfctl_version
+      changed_when: false
+      failed_when: True
+      tags: TC_008
+
+    - name: Checking pods under kubeflow namespace
+      command: kubectl get pods --namespace kubeflow
+      register: kubeflow_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_008
+
+    - name: Checking pods under istio-system namespace
+      command: kubectl get pods --namespace istio-system
+      register: istio_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_008
+
+    - name: Validating Kubeflow Installation
+      assert:
+        that:
+          - "'command not found' not in kfctl_version.stdout"
+        fail_msg: "{{ kubeflow_install_fail_msg }}"
+        success_msg: "{{ kubeflow_install_success_msg }}"
+      tags: TC_008
+
+    - name: Validating Kubeflow pods deployment
+      assert:
+        that:
+          - "'Running' in kubeflow_pods.stdout or 'ContainerCreating' in kubeflow_pods.stdout"
+          - "'Running' in istio_system_pods.stdout or 'ContainerCreating' in istio_system_pods.stdout"
+        fail_msg: "{{ kubeflow_pods_deployment_fail_msg }}"
+        success_msg: "{{ kubeflow_pods_deployment_success_msg }}"
+      tags: TC_008
+
+#Testcase OMNIA_1.1_US_CRD_TC_009
+# Execute omnia.yml and reboot all the nodes
+- name: OMNIA_1.1_US_CRD_TC_009
+  hosts: localhost
+  vars_files:
+    - test_vars/test_k8s_common_vars.yml
+    - test_vars/test_slurm_common_vars.yml
+
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_009
+
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_009
+
+
+    - name: Reboot the  nodes
+      command: ansible all -i ../inventory -b -B 1 -P 0 -m shell -a "sleep {{ sleep_time }} && reboot"
+      changed_when: false
+      tags: TC_009
+
+    - name: Waiting for services to restart
+      pause:
+         minutes: "{{ pod_time }}"
+      tags: TC_009
+
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_009
+
+
+# Testcase OMNIA_1.1_US_CRD_TC_002
+# Execute omnia.yml with single node scenario (manager, compute and login node on same server)
+- name: OMNIA_1.1_US_CRD_TC_002
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_002
+
+    - name: Creating test inventory file for single node scenario
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host1 }}  
+          
+          [login_node]
+          {{ host1 }}
+           
+          [nfs_node]
+      tags: TC_002
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_002
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_invalid }}"'
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_002
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      failed_when: true
+      changed_when: false
+      register: db_error
+      args: 
+        chdir: ../
+      tags: TC_002
+      
+    - name: Validate mariadb password error
+      assert:
+        that:
+          - '" mariadb_password not given in correct format" not in db_error.stdout'
+        fail_msg: "{{ mariadb_password_error_fail_msg }}"
+        success_msg: "{{ mariadb_password_error_success_msg }}"
+      tags: TC_002
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_002
+
+# Testcase OMNIA_1.1_US_CRD_TC_003
+# Execute omnia.yml with single node scenario (manager, compute,login,nfs node on same server) 
+- name: OMNIA_1.1_US_CRD_TC_003
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_003
+
+    - name: Creating inventory file for single node scenario
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host1 }}  
+          
+          [login_node]
+          {{ host1 }}
+
+          [nfs_node]
+      tags: TC_003
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_003
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_invalid }}"'
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_003
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      failed_when: true
+      changed_when: false
+      register: db_error
+      args: 
+        chdir: ../
+      tags: TC_003
+      
+    - name: Validate mariadb password error
+      assert:
+        that:
+          - '" mariadb_password not given in correct format" not in db_error.stdout'
+        fail_msg: "{{ mariadb_password_error_fail_msg }}"
+        success_msg: "{{ mariadb_password_error_success_msg }}"
+      tags: TC_003
+
+    - name: Delete the inventory file
+      changed_when: false
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_003
+
+#Testcase OMNIA_1.1_US_CRD_TC_004
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node on new kubernetes version
+- name: OMNIA_1.1_US_CRD_TC_004
+  hosts: localhost
+
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_004
+
+    - name: Creating test inventory
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}  
+          
+          [login_node]
+          {{ host3 }}
+         
+          [nfs_node]
+
+      tags: TC_004
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_004
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_two }}"'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_complex }}"'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_default }}"'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_version: ""'
+        replace: 'k8s_version: "{{ k8s_new_version }}"'
+      tags: TC_004
+
+    
+    - name: Execute omnia.yml 
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_004
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_004
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_004
+
+#Testcase OMNIA_1.1_US_CRD_TC_007
+# Execute omnia.yml after redeploying the cluster
+- name: OMNIA_1.1_US_CRD_TC_007
+  hosts: localhost
+
+  tasks:
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_007
+
+    - name: Re Execute omnia.yml
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_007
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_007
+
+# Testcase OMNIA_1.1_US_CRD_TC_010
+# Execute omnia.yml with same server for manager and compute with slurm first and kubernetes later
+- name: OMNIA_1.1_US_CRD_TC_010
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_010
+
+    - name: Creating test inventory file
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host1 }}  
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+      tags: TC_010
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_010
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_default }}"'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'ipa_admin_password: ""'
+        replace: 'ipa_admin_password: "{{ ipa_passwd_default }}"' 
+      tags: TC_010
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags kubernetes
+      failed_when: true
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_010
+
+    - name: Re Execute omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags slurm,freeipa
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_010
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_010
+
+
+
+# Testcase OMNIA_1.1_US_CRD_TC_011
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with slurm first and kubernetes later
+- name: OMNIA_1.1_US_CRD_TC_011
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_011
+
+    - name: Creating inventory file for
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}  
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+          {{ host4 }}
+      tags: TC_011
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_011
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_default }}"'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required: '
+        replace: 'login_node_required: true'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'ipa_admin_password: ""'
+        replace: 'ipa_admin_password: "{{ ipa_passwd_complex }}"'
+      tags: TC_011
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags kubernetes
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_011
+    
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags slurm,freeipa
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_011
+
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_011
+
+    - name: Delete the inventory file
+      changed_when: false
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_011
+
+# Testcase OMNIA_1.1_US_CRD_TC_012
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with slurm first and kubernetes later
+- name: OMNIA_1.1_US_CRD_TC_012
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_012
+
+    - name: Creating test inventory file 
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}  
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+      tags: TC_012
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_012
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_two }}"'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_default }}"'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: false'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'ipa_admin_password: ""'
+        replace: 'ipa_admin_password: "{{ ipa_passwd_invalid }}"'
+      tags: TC_012
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags kubernetes
+      failed_when: true
+      changed_when: false
+      register: ipa_error
+      args: 
+        chdir: ../
+      tags: TC_012
+      
+    - name: Validate ipa admin password error
+      assert:
+        that:
+          - '" Incorrect format provided for ipa_admin_password" not in ipa_error.stdout'
+        fail_msg: "{{ ipa_password_error_fail_msg }}"
+        success_msg: "{{ ipa_password_error_success_msg }}"
+      tags: TC_012
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_012

+ 468 - 0
test/test_omnia_validation.yml

@@ -0,0 +1,468 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+# Testcase OMNIA_1.1_US_CRD_TC_001
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with default parameters
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: manager, compute
+  vars_files:
+    - test_vars/test_k8s_common_vars.yml
+    - test_vars/test_slurm_common_vars.yml
+  tasks:
+    - name: Checking K8s service status
+      systemd:
+        name: kubelet
+      register: kubelet_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating K8s service status
+      assert:
+        that:
+          - kubelet_service.status.ActiveState == 'active'
+        fail_msg: "{{ kubelet_service_fail_msg }}"
+        success_msg: "{{ kubelet_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking munge service status
+      systemd:
+        name: munge
+      register: munge_service
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating munge service status
+      assert:
+        that:
+          - munge_service.status.ActiveState == 'active'
+        fail_msg: "{{ munge_service_fail_msg }}"
+        success_msg: "{{ munge_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: manager
+  vars_files:
+    - test_vars/test_k8s_start_manager_workers_vars.yml
+    - test_vars/test_k8s_start_services_vars.yml
+    - test_vars/test_slurmexporter_vars.yml
+    - test_vars/test_slurm_start_services_vars.yml
+    - test_vars/test_login_server_vars.yml
+    - test_vars/test_slurm_manager_vars.yml
+    - test_vars/test_login_node_vars.yml
+
+  tasks:      
+    - name: Checking kube-system pods
+      command: kubectl get pods --namespace kube-system --field-selector=status.phase=Running
+      register: kube_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating controller-manager and scheduler and coreDNS pods status
+      assert:
+        that:
+          - "'kube-scheduler' in kube_system_pods.stdout"
+          - "'kube-controller' in kube_system_pods.stdout"
+        fail_msg: "{{ controller_scheduler_status_fail_msg }}"
+        success_msg: "{{ controller_scheduler_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating coreDNS pods status
+      assert:
+        that:
+          - "'coredns' in kube_system_pods.stdout"
+        fail_msg: "{{ coredns_status_fail_msg }}"
+        success_msg: "{{ coredns_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking all running pods
+      command: kubectl get pods --all-namespaces --field-selector=status.phase=Running
+      register: running_pods
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating Metallb, Prometheus and MPI pods
+      assert:
+        that:
+          - "'metallb' in running_pods.stdout"
+          - "'prometheus' in running_pods.stdout"
+          - "'mpi-operator' in running_pods.stdout"
+        fail_msg: "{{ metallb_prometheus_mpi_pods_fail_msg }}"
+        success_msg: "{{ metallb_prometheus_mpi_pods_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating K8s dashboard
+      assert:
+        that:
+          - "'kubernetes-dashboard' in running_pods.stdout"
+        fail_msg: "{{ kubernetes_dashboard_fail_msg }}"
+        success_msg: "{{ kubernetes_dashboard_success_msg }}"
+      tags: VERIFY_OMNIA_01  
+    
+    - name: Verify slurm exporter status
+      systemd:
+        name: prometheus-slurm-exporter
+      register: slurm_exporter_status
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm exporter service status
+      assert:
+        that:
+          - slurm_exporter_status.status.ActiveState == 'active'
+        fail_msg: "{{ slurm_exporter_service_fail_msg }}"
+        success_msg: "{{ slurm_exporter_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Verify slurm exporter job in k8s services
+      shell: >-
+        export POD_NAME=$(kubectl get pods --namespace 
+        default -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}")
+      changed_when: true
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Get pod name
+      shell: echo $POD_NAME
+      register: pod_name
+      changed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Check if prometheus-server is in running state
+      command: kubectl get pods {{ pod_name.stdout }}
+      register: slurm_exporter_pod_status
+      ignore_errors: yes
+      changed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm exporter job in k8s services
+      assert:
+        that:
+          - "'Error from server' not in slurm_exporter_pod_status.stdout"
+        fail_msg: "{{ slurm_exporter_job_fail_msg }}"
+        success_msg: "{{ slurm_exporter_job_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking mariadb service status
+      systemd:
+        name: mariadb
+      register: mariadb_service
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating mariadb service status
+      assert:
+        that:
+          - mariadb_service.status.ActiveState == 'active'
+        fail_msg: "{{ mariadb_service_fail_msg }}"
+        success_msg: "{{ mariadb_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking slurmctld service status
+      systemd:
+        name: slurmctld
+      register: slurmctld_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking slurmdbd service status
+      systemd:
+        name: slurmdbd
+      register: slurmdbd_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Check if slurm is installed
+      command: sinfo -V
+      register: slurm_version
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating slurmctld service status
+      assert:
+        that:
+          - slurmctld_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmctld_service_fail_msg }}"
+        success_msg: "{{ slurmctld_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating slurmdbd service status
+      assert:
+        that:
+          - slurmdbd_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmdbd_service_fail_msg }}"
+        success_msg: "{{ slurmdbd_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm installation
+      assert:
+        that: "'command not found' not in slurm_version.stdout"
+        fail_msg: "{{ slurm_status_fail_msg }}"
+        success_msg: "{{ slurm_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Submit kubernetes job
+      command: kubectl run nginx --image=nginx --restart=Never
+      changed_when: false
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Check submitted kubernetes job status
+      command: kubectl get pod nginx
+      register: kubo_job
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate kubernetes job submission
+      assert:
+        that: "'pods nginx not found' not in kubo_job.stdout"
+        fail_msg: "{{ kubernetes_job_status_fail_msg }}"
+        success_msg: "{{ kubernetes_job_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+         
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: compute
+  vars_files:
+    - test_vars/test_slurm_workers_vars.yml
+  tasks:    
+    - name: Check if slurm is installed
+      command: sinfo -V
+      register: slurm_version
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking slurmd service status
+      service:
+        name: slurmd.service
+      register: slurmd_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm installation
+      assert:
+        that: "'command not found' not in slurm_version.stdout"
+        fail_msg: "{{ slurm_status_fail_msg }}"
+        success_msg: "{{ slurm_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating slurmd service status
+      assert:
+        that:
+          - slurmd_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmd_service_fail_msg }}"
+        success_msg: "{{ slurmd_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: manager, login_node
+  vars_files:
+    - test_vars/test_login_common_vars.yml
+    
+  tasks:    
+    - name: Checking installed Freeipa version
+      command: ipa --version
+      register: ipa_version
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating Freeipa Installation
+      assert:
+        that:
+          - "'command not found' not in ipa_version.stdout"
+        fail_msg: "{{ ipa_install_fail_msg }}"
+        success_msg: "{{ ipa_install_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Start and enable firewalld
+      service:
+        name: firewalld
+        state: started
+        enabled: yes
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking firewalld open ports on manager/login node
+      command: firewall-cmd --list-ports
+      changed_when: false
+      register: login_common_firewalld_ports
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating firewalld open ports on manager/login node
+      assert:
+        that:
+          - "'80/tcp' in login_common_firewalld_ports.stdout"
+          - "'443/tcp' in login_common_firewalld_ports.stdout"
+          - "'389/tcp' in login_common_firewalld_ports.stdout"
+          - "'636/tcp' in login_common_firewalld_ports.stdout"
+          - "'88/tcp' in login_common_firewalld_ports.stdout"
+          - "'464/tcp' in login_common_firewalld_ports.stdout"
+          - "'88/udp' in login_common_firewalld_ports.stdout"
+          - "'464/udp' in login_common_firewalld_ports.stdout"
+          - "'53/tcp' in login_common_firewalld_ports.stdout"
+          - "'53/udp' in login_common_firewalld_ports.stdout"
+          - "'123/udp' in login_common_firewalld_ports.stdout"
+          - "'7389/tcp' in login_common_firewalld_ports.stdout"
+        fail_msg: "{{ login_common_ports_status_fail_msg }}"
+        success_msg: "{{ login_common_ports_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Stop and disable firewalld
+      service:
+        name: firewalld
+        state: stopped
+        enabled: no
+      tags: VERIFY_OMNIA_01
+
+    - name: Check Freeipa server/client configuration
+      command: ipa help topics
+      register: ipa_config
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating Freeipa server/client Configuration
+      assert:
+        that:
+          - "'command not found' not in ipa_config.stdout"
+        fail_msg: "{{ ipa_configuration_fail_msg }}"
+        success_msg: "{{ ipa_configuration_success_msg }}"
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Ensure host is present
+      shell: echo "{{ ipa_admin_password }}" | kinit admin
+      register: authen
+      changed_when: false
+      ignore_errors: true
+      tags: VERIFY_OMNIA_01
+   
+    - name: Validate admin user in ipa server/client
+      assert:
+        that:
+          - authen.rc == 0
+        fail_msg: "{{ admin_user_authentication_status_fail_msg }}"
+        success_msg: "{{ admin_user_authentication_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: login_node
+  gather_facts: false
+  vars_files:
+    - test_vars/test_login_node_vars.yml
+    - test_vars/test_slurm_workers_vars.yml
+    
+  tasks: 
+    - name: Checking slurmd service status
+      service:
+        name: slurmd.service
+      register: slurmd_service
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating slurmd service status
+      assert:
+        that:
+          - slurmd_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmd_service_fail_msg }}"
+        success_msg: "{{ slurmd_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Submit slurm jobs
+      command: srun --nodes "{{ nodes }}" --ntasks-per-node "{{ ntasks }}" --partition normal hostname
+      register: job_status
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm job submission
+      assert:
+        that: "'compute.ipa.test' in job_status.stdout"
+        fail_msg: "{{ slurm_job_status_fail_msg }}"
+        success_msg: "{{ slurm_job_status_success_msg }}"
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: nfs_node
+  vars_files:
+    - test_vars/test_nfs_node_vars.yml
+   
+  tasks:
+      
+    - name: Checking rpcbind service status
+      systemd:
+        name: rpcbind
+      register: rpcbind_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating rpcbind service status
+      assert:
+        that:
+          - rpcbind_service.status.ActiveState == 'active'
+        fail_msg: "{{ rpcbind_service_fail_msg }}"
+        success_msg: "{{ rpcbind_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking nfs-server service status
+      systemd:
+        name: nfs-server
+      register: nfs_server_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating nfs-server service status
+      assert:
+        that:
+          - nfs_server_service.status.ActiveState == 'active'
+        fail_msg: "{{ nfs_server_service_fail_msg }}"
+        success_msg: "{{ nfs_server_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking nfs-lock service status
+      systemd:
+        name: nfs-lock
+      register: nfs_lock_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating nfs-lock service status
+      assert:
+        that:
+          - nfs_lock_service.status.ActiveState == 'active'
+        fail_msg: "{{ nfs_lock_service_fail_msg }}"
+        success_msg: "{{ nfs_lock_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking nfs-idmap service status
+      systemd:
+        name: nfs-idmap
+      register: nfs_idmap_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating nfs-idmap service status
+      assert:
+        that:
+          - nfs_idmap_service.status.ActiveState == 'active'
+        fail_msg: "{{ nfs_idmap_service_fail_msg }}"
+        success_msg: "{{ nfs_idmap_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Check if nfs server setup is complete
+      command: exportfs -v
+      changed_when: false
+      register: nfs_share
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validate nfs server setup
+      assert:
+        that: "'{{ nfs_dir }}' in nfs_share.stdout"
+        fail_msg: "{{ nfs_server_fail_msg }}"
+        success_msg: "{{ nfs_server_success_msg }}"
+      tags: VERIFY_OMNIA_01      

+ 28 - 0
test/test_vars/test_login_common_vars.yml

@@ -0,0 +1,28 @@
+
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+ipa_admin_password: "omnia@1234"
+login_common_ports_status_fail_msg: "Tcp/udp ports are not opened in manager/login node"
+login_common_ports_status_success_msg: "Tcp/udp ports are opened in manager/login node"
+
+ipa_install_fail_msg: "FreeIpa is not installed"
+ipa_install_success_msg: "FreeIpa is installed"
+
+ipa_configuration_fail_msg: "Freeipa is not configured properly"
+ipa_configuration_success_msg: "Freeipa is configured properly"
+
+admin_user_authentication_status_fail_msg: "Admin user denied access"
+admin_user_authentication_status_success_msg: "Admin user successfully authenticated" 

+ 31 - 0
test/test_vars/test_login_node_vars.yml

@@ -0,0 +1,31 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+ipa_client_packages:
+  - bind-utils
+  - freeipa-client
+  - ipa-admintools
+
+freeipa_client_packages_status_success_msg: "Freeipa-client packages are installed"
+freeipa_client_packages_status_fail_msg: "Freeipa-client packages are not installed"
+
+nodes: "1"
+ntasks: "1"
+
+slurm_job_status_fail_msg: "Slurm jobs execution failed"
+slurm_job_status_success_msg: "Slurm jobs executed and running successfully"
+
+kubernetes_job_status_fail_msg: "Kubernetes job failed"
+kubernetes_job_status_success_msg: "Kubernetes job is running successfully"

+ 25 - 0
test/test_vars/test_login_server_vars.yml

@@ -0,0 +1,25 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+ipa_server_packages:
+  - bind
+  - bind-dyndb-ldap
+  - ipa-server-dns
+  - freeipa-server
+
+
+freeipa_server_packages_status_success_msg: "Freeipa-server packages are installed"
+freeipa_server_packages_status_fail_msg: "Freeipa-server packages are not installed"
+

+ 34 - 0
test/test_vars/test_nfs_node_vars.yml

@@ -0,0 +1,34 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+nfs_dir: "/me4_k8s_nfs"
+
+nfs_dir_fail_msg: "nfs share directory is present"
+nfs_dir_success_msg: "Nfs share directory is not present"
+
+rpcbind_service_fail_msg: "Rpcbind service is not running"
+rpcbind_service_success_msg: "Rpcbind service is running"
+
+nfs_server_service_fail_msg: "nfs-server service is not running"
+nfs_server_service_success_msg: "nfs-server service is running"
+
+nfs_lock_service_fail_msg: "nfs-lock service is not running"
+nfs_lock_service_success_msg: "nfs-lock service is running"
+
+nfs_idmap_service_fail_msg: "nfs-idmap service is not running"
+nfs_idmap_service_success_msg: "nfs-idmap service is running"
+
+nfs_server_success_msg: "nfs server is setup successfully"
+nfs_server_fail_msg: "nfs server setup is unsuccessful"

+ 50 - 0
test/test_vars/test_omnia_1.1_vars.yml

@@ -0,0 +1,50 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+host1: 192.168.0.17
+host2: 192.168.0.19
+host3: 192.168.0.16
+host4: 192.168.0.22
+host5: 192.168.0.18
+
+config_filename: "omnia_config.yml"
+config_vaultname: ".omnia_vault_key"
+
+file_permission: "0644"
+db_passwd_invalid: "omnia123-"
+db_passwd_complex: "omnIaFD@123gn)opk"
+db_passwd_default: "password"
+k8s_cni_one: "flannel"
+k8s_cni_two: "calico"
+k8s_pod_network_cidr_default: "10.244.0.0/16"
+k8s_pod_network_cidr_other: "192.168.0.0/16"
+k8s_new_version: "1.19.3"
+ipa_passwd_invalid: "Omnia12-3"
+ipa_passwd_default: "omnia1234"
+ipa_passwd_complex: "Omnia@De9$123%"
+sleep_time: 5
+pod_time: 10
+
+compute_node_success_msg: "New compute node is successfully added to the cluster"
+compute_node_fail_msg: " New compute node failed to add in the cluster"
+
+compute_node_del_success_msg: "New compute node is successfully deleted from the cluster"
+compute_node_del_fail_msg: " New compute node failed to delete from the cluster"
+
+ipa_password_error_fail_msg: "ipa admin invalid password value passed"
+ipa_password_error_success_msg: "ipa admin invalid password value failed"
+
+mariadb_password_error_fail_msg: "mariadb password invalid value passed"
+mariadb_password_error_success_msg: "mariadb password invalid value failed"