Преглед на файлове

Merge branch 'devel' into infiniband

Shubhangi Srivastava преди 3 години
родител
ревизия
e8609a3611
променени са 74 файла, в които са добавени 12029 реда и са изтрити 783 реда
  1. 47 3
      .all-contributorsrc
  2. 3 3
      .github/workflows/ansible-lint.yml
  3. 7 3
      README.md
  4. 8 1
      control_plane/input_params/base_vars.yml
  5. 16 8
      control_plane/input_params/powervault_me4_vars.yml
  6. 37 0
      control_plane/roles/control_plane_common/tasks/count_component_roles.yml
  7. 10 3
      control_plane/roles/control_plane_common/tasks/fetch_base_inputs.yml
  8. 9 1
      control_plane/roles/control_plane_common/tasks/main.yml
  9. 82 0
      control_plane/roles/control_plane_common/tasks/validate_device_mapping_file.yml
  10. 179 0
      control_plane/roles/control_plane_common/tasks/validate_host_mapping_file.yml
  11. 33 1
      control_plane/roles/control_plane_common/vars/main.yml
  12. 6 1
      control_plane/roles/control_plane_customiso/files/temp_centos7.cfg
  13. 7 1
      control_plane/roles/control_plane_customiso/vars/main.yml
  14. 14 6
      control_plane/roles/powervault_me4/tasks/map_volume.yml
  15. 5 0
      control_plane/roles/powervault_me4/tasks/ports.yml
  16. 4 4
      control_plane/roles/powervault_me4/tasks/pv_me4_prereq.yml
  17. 15 3
      control_plane/roles/powervault_me4/tasks/pv_validation.yml
  18. 76 7
      control_plane/roles/powervault_me4/tasks/volume.yml
  19. 1 2
      control_plane/roles/powervault_me4/vars/main.yml
  20. 22 1
      control_plane/roles/provision_cobbler/files/cobbler_configurations.yml
  21. 48 25
      control_plane/roles/provision_idrac/tasks/check_prerequisites.yml
  22. 24 22
      control_plane/roles/provision_idrac/tasks/deploy_os.yml
  23. 22 13
      control_plane/roles/provision_idrac/tasks/import_scp.yml
  24. 6 1
      control_plane/roles/provision_idrac/vars/main.yml
  25. 10 0
      control_plane/test/temp_scp.xml
  26. 709 0
      control_plane/test/test_control_plane.yml
  27. 271 0
      control_plane/test/test_control_plane_validation.yml
  28. 47 0
      control_plane/test/test_eth_mtu.yml
  29. 346 0
      control_plane/test/test_ethernet_config.yml
  30. 157 0
      control_plane/test/test_ethernet_fact.yml
  31. 5 0
      control_plane/test/test_ethernet_inventory
  32. 1395 0
      control_plane/test/test_idrac.yml
  33. 2 0
      control_plane/test/test_idrac_inventory
  34. 197 0
      control_plane/test/test_idrac_validation.yml
  35. 1220 0
      control_plane/test/test_powervault.yml
  36. 135 0
      control_plane/test/test_prepare.yml
  37. 2 0
      control_plane/test/test_pv_inventory
  38. 150 0
      control_plane/test/test_vars/base_vars.yml
  39. 81 0
      control_plane/test/test_vars/login_vars.yml
  40. 94 0
      control_plane/test/test_vars/test_control_plane_vars.yml
  41. 56 0
      control_plane/test/test_vars/test_ethernet_vars.yml
  42. 75 0
      control_plane/test/test_vars/test_idrac_vars.yml
  43. 88 0
      control_plane/test/test_vars/test_powervault_vars.yml
  44. 5 1
      control_plane/tools/roles/configure_new_devices/tasks/main.yml
  45. 2 1
      control_plane/tools/roles/configure_new_devices/vars/main.yml
  46. 2 2
      control_plane/tools/roles/idrac_2fa/tasks/configure_smtp.yml
  47. 11 2
      docs/FAQ.md
  48. 3 0
      examples/host_mapping_file_one_touch.csv
  49. 3 0
      examples/host_mapping_file_os_provisioning.csv
  50. 2 0
      examples/mapping_device_file.csv
  51. 0 2
      examples/mapping_file.csv
  52. 1 1
      omnia.yml
  53. 11 30
      platforms/roles/jupyterhub/tasks/main.yml
  54. 91 0
      roles/cluster_validation/tasks/fetch_powervault_status.yml
  55. 63 8
      roles/cluster_validation/tasks/main.yml
  56. 10 2
      roles/cluster_validation/tasks/validations.yml
  57. 13 4
      roles/cluster_validation/vars/main.yml
  58. 4090 0
      roles/k8s_start_manager/files/kube-calico.yaml
  59. 63 376
      roles/k8s_start_manager/files/kube-flannel.yaml
  60. 50 8
      roles/k8s_start_manager/tasks/main.yml
  61. 6 2
      roles/k8s_start_manager/vars/main.yml
  62. 102 0
      roles/k8s_start_services/tasks/check_k8s_pods.yml
  63. 220 0
      roles/k8s_start_services/tasks/deploy_k8s_services.yml
  64. 8 232
      roles/k8s_start_services/tasks/main.yml
  65. 5 0
      roles/k8s_start_services/vars/main.yml
  66. 2 1
      roles/login_server/tasks/install_packages.yml
  67. 8 2
      roles/slurm_workers/tasks/main.yml
  68. 901 0
      test/test_omnia_1.1.yml
  69. 468 0
      test/test_omnia_validation.yml
  70. 28 0
      test/test_vars/test_login_common_vars.yml
  71. 31 0
      test/test_vars/test_login_node_vars.yml
  72. 25 0
      test/test_vars/test_login_server_vars.yml
  73. 34 0
      test/test_vars/test_nfs_node_vars.yml
  74. 50 0
      test/test_vars/test_omnia_1.1_vars.yml

+ 47 - 3
.all-contributorsrc

@@ -18,7 +18,8 @@
         "ideas",
         "maintenance",
         "mentoring",
-        "design"
+        "design",
+        "review"
       ]
     },
     {
@@ -122,7 +123,9 @@
       "avatar_url": "https://avatars.githubusercontent.com/u/73212230?v=4",
       "profile": "https://github.com/abhishek-s-a",
       "contributions": [
-        "code"
+        "code",
+        "doc",
+        "test"
       ]
     },
     {
@@ -160,7 +163,8 @@
       "avatar_url": "https://avatars.githubusercontent.com/u/72784834?v=4",
       "profile": "https://github.com/VishnupriyaKrish",
       "contributions": [
-        "code"
+        "code",
+        "test"
       ]
     },
     {
@@ -243,6 +247,46 @@
       "contributions": [
         "ideas"
       ]
+    },
+    {
+      "login": "Kavyabr23",
+      "name": "Kavyabr23",
+      "avatar_url": "https://avatars.githubusercontent.com/u/90390587?v=4",
+      "profile": "https://github.com/Kavyabr23",
+      "contributions": [
+        "code",
+        "test"
+      ]
+    },
+    {
+      "login": "vedaprakashanp",
+      "name": "vedaprakashanp",
+      "avatar_url": "https://avatars.githubusercontent.com/u/90596073?v=4",
+      "profile": "https://github.com/vedaprakashanp",
+      "contributions": [
+        "test",
+        "code"
+      ]
+    },
+    {
+      "login": "Bhagyashree-shetty",
+      "name": "Bhagyashree-shetty",
+      "avatar_url": "https://avatars.githubusercontent.com/u/90620926?v=4",
+      "profile": "https://github.com/Bhagyashree-shetty",
+      "contributions": [
+        "test",
+        "code"
+      ]
+    },
+    {
+      "login": "nihalranjan-hpc",
+      "name": "Nihal Ranjan",
+      "avatar_url": "https://avatars.githubusercontent.com/u/84398828?v=4",
+      "profile": "https://github.com/nihalranjan-hpc",
+      "contributions": [
+        "test",
+        "code"
+      ]
     }
   ],
   "contributorsPerLine": 7,

+ 3 - 3
.github/workflows/ansible-lint.yml

@@ -39,9 +39,9 @@ jobs:
         # [optional]
         # Arguments to override a package and its version to be set explicitly.
         # Must follow the example syntax.
-        #override-deps: |
-        #  ansible==2.9
-        #  ansible-lint==4.2.0
+        override-deps: |
+          ansible==2.10
+          ansible-lint==5.1.2
         # [optional]
         # Arguments to be passed to the ansible-lint
 

Файловите разлики са ограничени, защото са твърде много
+ 7 - 3
README.md


+ 8 - 1
control_plane/input_params/base_vars.yml

@@ -68,6 +68,13 @@ awx_organization: "DellEMC"
 
 ### Usage: provision_cobbler, provision_idrac ###
 
+# This variable is used to set node provisioning method
+# It accepts values: idrac, pxe
+# Default value is "idrac"
+# If provisioning needs to be done through cobbler, set it to "pxe"
+# If idrac license is not present, provisioning mode will be set to "pxe"
+provision_method: "idrac"
+
 # This is the timezone that will be set during provisioning of OS
 # Available timezones are provided in control_plane/common/files/timezone.txt
 # Default timezone will be "GMT"
@@ -142,4 +149,4 @@ ib_network_dhcp_end_range: ""
 # Eg: xx:yy:zz:aa:bb,server,172.17.0.5
 # A template for mapping file exists in omnia/examples and is named as mapping_file.csv.
 # This depicts the path where user has kept the mapping file for DHCP configurations.
-ib_mapping_file_path: ""
+ib_mapping_file_path: ""

+ 16 - 8
control_plane/input_params/powervault_me4_vars.yml

@@ -22,7 +22,7 @@ locale: "English"
 # Specify the system name to identify the system
 # By default it is set to "Uninitialized_Name"
 # Length should be less than 30 and it should not contain space.
-# This is "optional"
+# Optional
 powervault_me4_system_name: "Unintialized_Name"
 
 # Specify the snmp notification level
@@ -37,6 +37,11 @@ powervault_me4_system_name: "Unintialized_Name"
 # Compulsory
 powervault_me4_snmp_notify_level: "none"
 
+# The type of pool to be created on the powervault
+# It can be either linear or virtual.
+# Default: linear
+powervault_me4_pool_type: "linear"
+
 # Specify the required RAID Level
 # The different RAID levels and the min and max number of disks supported for each RAID are
 # r1/raid1: 2
@@ -53,24 +58,27 @@ powervault_me4_raid_levels: "raid1"
 # the enclosure number and disk range in the Enter Range of Disks text box. 
 # Use the format enclosure-number.disk-range,enclosure-number.disk-range. 
 # For example, to select disks 3-12 in enclosure 1 and 5-23 in enclosure 2, enter 1.3-12,2.5-23.
-# For ME4012 - 0.0-0.11,1.0-1.11 are the allowed values
+# For ME4012 - 0.0-0.11 are the allowed values
+# For RAID 10 disk range should be given in subgroups separated by colons with no spaces.
+# RAID-10 example:1.1-2:1.3-4:1.7,1.10
 # Default value is 0.1-2
 # Compulsory
 powervault_me4_disk_range: "0.1-2"
 
 # Specify the volume names
-# Cannot be left blank
 # the default value is "k8s_volume" and "slurm_volume"
 # Compulsory
 powervault_me4_k8s_volume_name: "k8s_volume"
 powervault_me4_slurm_volume_name: "slurm_volume"
 
 # Specify the disk group name
-# If left blank, system automatically assigns the name
+# Mandatory
 powervault_me4_disk_group_name: "omnia"
 
 # Specify the percentage for partition in disk
 # Default value is "60%"
+# Min: 5
+# Max: 90
 # Compulsory
 powervault_me4_disk_partition_size: "60"
 
@@ -79,12 +87,12 @@ powervault_me4_disk_partition_size: "60"
 # Compulsory
 powervault_me4_volume_size: "100GB"
 
-#Specify the pool for volume
+# Specify the pool for disk and volumes
 # Pool can either be  a/A or b/B.
-# Compulsory
+# Mandatory if powervault_me4_pool_type selected as "virtual".
 powervault_me4_pool: "a"
 
 # Specify the nic of the server with which Powervault is connected.
-# Default value is eno1.
+# Default value is em1.
 # Compulsory
-powervault_me4_server_nic: "eno1"
+powervault_me4_server_nic: "em1"

+ 37 - 0
control_plane/roles/control_plane_common/tasks/count_component_roles.yml

@@ -0,0 +1,37 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# limitations under the License.
+---
+
+- name: Count of manager nodes defined
+  set_fact:
+    count_of_manager: "{{ count_of_manager| int + 1 }}"
+  when: item == group_name_manager
+  tags: install
+
+- name: Count of compute nodes defined
+  set_fact:
+    count_of_compute: "{{ count_of_compute| int + 1 }}"
+  when: item == group_name_compute
+  tags: install
+
+- name: Count of login nodes defined
+  set_fact:
+    count_of_login: "{{ count_of_login| int + 1 }}"
+  when: item == group_name_login
+  tags: install
+
+- name: Count of NFS nodes defined
+  set_fact:
+    count_of_nfs_node: "{{ count_of_nfs_node| int + 1 }}"
+  when: item == group_name_nfs
+  tags: install

+ 10 - 3
control_plane/roles/control_plane_common/tasks/fetch_base_inputs.yml

@@ -34,7 +34,8 @@
       mngmnt_network_dhcp_end_range | length < 1 or
       host_network_nic | length < 1 or
       host_network_dhcp_start_range | length < 1 or
-      host_network_dhcp_end_range | length < 1
+      host_network_dhcp_end_range | length < 1 or
+      provision_method | length < 1
 
 - name: Validate infiniband base_vars are not empty
   assert:
@@ -47,7 +48,6 @@
   register: ib_check
   when: ib_switch_support
 
-
 - name: Set facts to validate snmp support
   set_fact:
     snmp_enabled: false   
@@ -159,6 +159,13 @@
     success_msg: "{{ success_awx_organization }}"
     fail_msg: "{{ fail_awx_organization }}"
 
+- name: Assert provisioning method
+  assert:
+    that:
+      - provision_method == "pxe" or provision_method == "idrac"
+    success_msg: "{{ success_provision_method }}"
+    fail_msg: "{{ fail_provision_method }}"
+    
 - name: Check timezone file
   command: grep -Fx "{{ timezone }}" {{ role_path }}/files/timezone.txt
   ignore_errors: yes
@@ -452,4 +459,4 @@
       - ib_network_nic != host_network_nic
     success_msg: "{{ success_msg_different_nics_ib }}"
     fail_msg: "{{ fail_msg_different_nics_ib }}"
-  when: ib_switch_support
+  when: ib_switch_support

+ 9 - 1
control_plane/roles/control_plane_common/tasks/main.yml

@@ -38,8 +38,16 @@
   import_tasks: fetch_sm_inputs.yml
   when: ib_switch_support
 
+- name: Host mapping file validation
+  import_tasks: validate_host_mapping_file.yml
+  when: host_mapping_file_path |length >0
+
+- name: Device mapping file validation
+  import_tasks: validate_device_mapping_file.yml
+  when: mngmnt_mapping_file_path |length >0
+
 - name: Encrypt idrac_tools_vars.yml
   import_tasks: encrypt_idrac_tools_vars.yml
 
 - name: NFS Server setup for offline repo and awx
-  import_tasks: nfs_server_setup.yml
+  import_tasks: nfs_server_setup.yml

+ 82 - 0
control_plane/roles/control_plane_common/tasks/validate_device_mapping_file.yml

@@ -0,0 +1,82 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# limitations under the License.
+---
+- name: Check that device mapping file exists at mentioned path
+  stat:
+    path: "{{ mngmnt_mapping_file_path }}"
+  register: stat_result
+  tags: install
+
+- name: Fail if config file doesn't exist
+  fail:
+    msg: "{{ fail_msg_mapping_file + mngmnt_mapping_file_path }}"
+  when: not stat_result.stat.exists
+  tags: install
+
+- name: Read device mapping file from CSV file and return a dictionary
+  read_csv:
+    path: "{{ mngmnt_mapping_file_path }}"
+    key: "{{ mapping_file_key }}"
+  register: device_mapping_file
+  delegate_to: localhost
+  tags: install
+
+- name: Check if header is present in mapping file
+  shell:  set -o pipefail && awk 'NR==1 { print $1}' "{{ mngmnt_mapping_file_path }}"
+  register: mngmnt_header
+  changed_when: false
+  tags: install
+
+- name: Fail if header not in correct format
+  fail:
+    msg: "{{ fail_device_mapping_file_header }}"
+  when: mngmnt_header.stdout !=  device_mapping_header_format
+  tags: install
+
+- name: Check if mapping file is comma seperated
+  shell: awk -F\, '{print NF-1}' "{{ mngmnt_mapping_file_path }}"
+  register: mngmnt_comma_seperated
+  changed_when: false
+  tags: install
+
+- name: Fail if not comma seperated or if all fields are not given
+  fail:
+    msg: "{{ fail_mapping_file_field_seperation }}"
+  when: not(item =="1")
+  with_items: "{{ mngmnt_comma_seperated.stdout_lines }}"
+  tags: install
+
+- name: Initialize count variables
+  set_fact:
+    list_of_ips: []
+    count_total_items: "{{ device_mapping_file.dict |length }}"
+  tags: install
+
+- name: Create list of IPs in mapping file
+  set_fact:
+    list_of_ips: "{{ [ item.value.IP ] + list_of_ips }}"
+  loop: "{{ device_mapping_file.dict | dict2items }}"
+  loop_control:
+    label: "{{ item.value.MAC }}"
+  tags: install
+
+- name: Find count of unique IPs
+  set_fact:
+    count_of_unique_ip : "{{ list_of_ips| unique| length }}"
+  tags: install
+
+- name: Validation to check if unique IPs are provided for each node
+  fail:
+    msg: "{{ fail_mapping_file_duplicate_ip + mngmnt_mapping_file_path }}"
+  when: not(count_of_unique_ip|int == count_total_items|int)
+  tags: install

+ 179 - 0
control_plane/roles/control_plane_common/tasks/validate_host_mapping_file.yml

@@ -0,0 +1,179 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# limitations under the License.
+---
+- name: Check that host mapping file exists at mentioned path
+  stat:
+    path: "{{ host_mapping_file_path }}"
+  register: stat_result
+  tags: install
+
+- name: Fail if config file doesn't exist
+  fail:
+    msg: "{{ fail_msg_mapping_file + host_mapping_file_path }}"
+  when: not stat_result.stat.exists
+  tags: install
+
+- name: Read host mapping file from CSV file and return a dictionary
+  read_csv:
+    path: "{{ host_mapping_file_path }}"
+    key: "{{ mapping_file_key }}"
+  register: mapping_file
+  delegate_to: localhost
+  tags: install
+
+- name: Initialize variable for role support in mapping file
+  set_fact:
+    component_role_support: false
+  tags: install
+
+- name: Check if header is present in mapping file
+  shell:  set -o pipefail && awk 'NR==1 { print $1}' "{{ host_mapping_file_path }}"
+  register: mngmnt_header
+  changed_when: false
+  tags: install
+
+- name: Fail if header not in correct format
+  assert:
+    that: (mngmnt_header.stdout ==  host_mapping_header_format) or (mngmnt_header.stdout == host_mapping_header_with_role_format)
+    fail_msg: "{{ fail_mapping_file_header }}"
+  tags: install
+
+- name: Check if mapping file is comma seperated
+  shell: awk -F\, '{print NF-1}' "{{ host_mapping_file_path }}"
+  register: mngmnt_comma_seperated
+  changed_when: false
+  tags: install
+
+- name: Set variable if component roles given in mapping file
+  set_fact:
+    component_role_support: true
+  when: mngmnt_header.stdout == host_mapping_header_with_role_format
+  tags: install
+
+- name: Fail if not comma seperated or if all fields are not given for MAC,Hostname,IP,Component_role
+  fail:
+    msg: "{{ fail_mapping_file_field_seperation }}"
+  when: not(item =="3") and not (item == "-1") and component_role_support
+  with_items: "{{ mngmnt_comma_seperated.stdout_lines }}"
+  tags: install
+
+- name: Fail if not comma seperated or if all fields are not given for MAC,Hostname,IP
+  fail:
+    msg: "{{ fail_mapping_file_field_seperation }}"
+  when: not(item =="2") and not (item == "-1") and not(component_role_support)
+  with_items: "{{ mngmnt_comma_seperated.stdout_lines }}"
+  tags: install
+
+- name: Initialize count variables
+  set_fact:
+    list_of_ips: []
+    list_of_roles: []
+    list_of_hostnames: []
+    count_of_manager: 0
+    count_of_compute: 0
+    count_of_nfs_node: 0
+    count_of_login: 0
+    count_total_items: "{{ mapping_file.dict |length }}"
+  tags: install
+
+- name: Create list of IPs and component roles and hostnames defined in mapping file
+  set_fact:
+    list_of_ips: "{{ [ item.value.IP ] + list_of_ips }}"
+    list_of_hostnames: "{{ [ item.value.Hostname ] + list_of_hostnames }}"
+  loop: "{{ mapping_file.dict | dict2items }}"
+  loop_control:
+    label: "{{ item.value.MAC }}"
+  tags: install
+
+- name: Create list of component roles defined in mapping file
+  set_fact:
+    list_of_roles: "{{ [ item.value.Component_role ] + list_of_roles }}"
+  loop: "{{ mapping_file.dict | dict2items }}"
+  loop_control:
+    label: "{{ item.value.MAC }}"
+  when: component_role_support
+  tags: install
+
+- name: Assert hostnames
+  assert:
+    that:
+      - '"_" not in item'
+      - '"." not in item'
+      - '" " not in item'
+    quiet: yes
+    fail_msg: "{{ fail_mapping_file_hostname_chars + item }}"
+  with_items: "{{ list_of_hostnames }}"
+  tags: install
+
+- name: Find count of unique IPs
+  set_fact:
+    count_of_unique_ip : "{{ list_of_ips| unique| length }}"
+  tags: install
+
+- name: Validation to check if unique IPs are provided for each node
+  fail:
+    msg: "{{ fail_mapping_file_duplicate_ip + host_mapping_file_path }}"
+  when: not(count_of_unique_ip|int == count_total_items|int)
+  tags: install
+
+- name: Find count of unique hostnames
+  set_fact:
+    count_of_unique_hostnames : "{{ list_of_hostnames | unique | length }}"
+  tags: install
+
+- name: Validation to check if unique hostnames are provided for each node
+  fail:
+    msg: "{{ fail_mapping_file_duplicate_hostname }}"
+  when: not(count_of_unique_hostnames|int == count_total_items| int)
+  tags: install
+
+- name: Find count of each component role defined in mapping file
+  include_tasks: count_component_roles.yml
+  loop: "{{ list_of_roles }}"
+  when: component_role_support
+  tags: install
+
+- block:
+  - name: Validation to check if component roles for each node is defined
+    fail:
+      msg: "{{ fail_mapping_file_roles_error }}"
+    when: not( count_total_items|int == (count_of_manager|int + count_of_compute|int + count_of_login|int + count_of_nfs_node|int))
+
+  - name: Validation to check number of manager nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_manager_role }}"
+    when: not (count_of_manager | int  == 1)
+
+  - name: Validation to check number of compute nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_compute_role }}"
+    when: count_of_compute|int  < 1
+
+  - name: Validation to check number of login nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_login_role }}"
+    when: not ( count_of_login|int == 1)
+
+  - name: Validation to check number of nfs nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_nfs_role }}"
+    when: powervault_support and not (count_of_nfs_node|int == 1)
+  tags: install
+
+  rescue:
+  - name: Count of roles defined
+    fail:
+      msg: "{{ count_of_roles_defined }}"
+    tags: install
+
+  when: component_role_support

+ 33 - 1
control_plane/roles/control_plane_common/vars/main.yml

@@ -89,6 +89,8 @@ success_msg_k8s_pod_network_cidr: "Appliance k8s pod network cidr validated"
 fail_msg_k8s_pod_network_cidr: "Failed. Incorrect appliance k8s pod network cidr provided in base_vars.yml"
 success_awx_organization: "awx organization validated"
 fail_awx_organization: "Failed. Incorrect format in awx organization"
+success_provision_method: "Provision method validated"
+fail_provision_method: "Failed. Provision method can either be set to idrac or pxe"
 success_timezone_msg: "timezone validated"
 fail_timezone_msg: "Failed. Incorrect timezone provided. Please check the file timezone.txt in control_plane/roles/control_plane_common/files/ folder"
 fail_language: "Failed. Only en-US(english) language supported"
@@ -137,4 +139,34 @@ exports_file_path: /etc/exports
 nfs_services:
   - mountd
   - rpc-bind
-  - nfs
+  - nfs
+
+# Usage: validate_host_mapping_file.yml
+fail_msg_mapping_file: "Mapping file doesn't exist at given path: "
+mapping_file_key: "MAC"
+fail_mapping_file_header: "Header of csv file is not in correct format.
+                          It should be of the format: MAC,Hostname,IP,Component_role or MAC,Hostname,IP"
+host_mapping_header_format: "MAC,Hostname,IP"
+host_mapping_header_with_role_format: "MAC,Hostname,IP,Component_role"
+fail_mapping_file_field_seperation: "Failed: Mapping file should be comma separated and all fields must be filled."
+fail_mapping_file_duplicate_ip: "Failed: Duplicate ip exists. Please verify following mapping file again: "
+fail_mapping_file_duplicate_hostname: "Failed: Duplicate hostname exists. Please verify host mapping file again."
+fail_mapping_file_hostname_chars: "Hostname should not contain _ or . or space as it will cause error with slurm and K8s. Found in: "
+fail_mapping_file_roles_error: "Failed. Define correct Component Roles for each node.
+                                Component roles can only take values: {{ group_name_manager }}, {{group_name_compute}},
+                                 {{ group_name_login }}, {{ group_name_nfs }}"
+fail_mapping_file_manager_role: "Exactly 1 manager node must be defined"
+fail_mapping_file_compute_role: "Atleast 1 compute node must be defined"
+fail_mapping_file_login_role: "Exactly 1 login node must be defined"
+fail_mapping_file_nfs_role: "Exactly 1 nfs node must be defined"
+count_of_roles_defined: "Component Roles defined: Manager Node: {{ count_of_manager }},
+                        Compute Nodes: {{ count_of_compute }}, Login Node: {{ count_of_login }},
+                        Nfs Node: {{ count_of_nfs_node }}, Total Nodes: {{ count_total_items }} "
+group_name_manager: "manager"
+group_name_compute: "compute"
+group_name_login: "login_node"
+group_name_nfs: "nfs_node"
+
+# Usage: validate_device_mapping_file.yml
+fail_device_mapping_file_header: "Failed: Header (MAC,IP) should be present in the mapping file."
+device_mapping_header_format: "MAC,IP"

+ 6 - 1
control_plane/roles/control_plane_customiso/files/temp_centos7.cfg

@@ -53,4 +53,9 @@ reboot
 %packages
 @core
 net-tools
-%end
+%end
+
+%post --log=/root/ks-post.log
+yum groupinstall "Infiniband Support" -y
+yum install infiniband-diags perftest qperf -y
+%end

+ 7 - 1
control_plane/roles/control_plane_customiso/vars/main.yml

@@ -32,9 +32,15 @@ host_nic:
  - em4
  - p4p1
  - p4p2
+ - p3p1
+ - p3p2
+ - p2p1
+ - p2p2
+ - p1p2
+ - p1p1
 
 #Usage: create_unattended_iso.yml
 unattended_iso_filename: unattended_centos7.iso
 custom_iso_success_msg: "Unattended ISO file created successfully"
 custom_iso_fail_msg: "Unattended ISO file creation failed. Ensure /mnt/iso path is mounted with valid centos minimal ISO file."
-management_station_ip_file: "management_station_ip.txt"
+management_station_ip_file: "management_station_ip.txt"

+ 14 - 6
control_plane/roles/powervault_me4/tasks/map_volume.yml

@@ -24,6 +24,7 @@
   changed_when: false
   no_log: true
   register: config_content
+  delegate_to: localhost
   run_once: true
 
 - name: Decrpyt login_vars.yml
@@ -32,6 +33,7 @@
     --vault-password-file {{ login_pv_vault_file }}
   changed_when: false
   run_once: true
+  delegate_to: localhost
   when: "'$ANSIBLE_VAULT;' in config_content.stdout"
 
 - name: Include variable file login_vars.yml
@@ -57,9 +59,18 @@
   delegate_to: localhost
   tags: install
 
+- name: Get map port
+  set_fact:
+    map_port: "{{ item.0 }}"
+  when: hostvars['pv']['map_ip'] == item.1
+  with_together:
+    - "{{ up_port }}"
+    - "{{ set_port_ip }}"
+  register: output
+
 - name: Map volume
   uri:
-    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_k8s_volume_name }}/access/{{ access }}/ports/{{ item.0 }}/lun/{{ lun1 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn'] }}
+    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_k8s_volume_name }}/access/{{ access }}/ports/{{ map_port }}/lun/{{ lun1 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn'] }}
     method: GET
     body_format: json
     validate_certs: no
@@ -67,14 +78,12 @@
     headers:
       {'sessionKey': "{{ map_session_key.json.status[0].response }}", 'datatype':'json'}
   register: map_vol1
-  with_together:
-    - "{{ up_port }}"
   delegate_to: localhost
   tags: install
 
 - name: Map volume
   uri:
-    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_slurm_volume_name }}/access/{{ access }}/ports/{{ item.0 }}/lun/{{ lun2 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn']  }}
+    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_slurm_volume_name }}/access/{{ access }}/ports/{{ map_port }}/lun/{{ lun2 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn']  }}
     method: GET
     body_format: json
     validate_certs: no
@@ -82,8 +91,6 @@
     headers:
       {'sessionKey': "{{ map_session_key.json.status[0].response }}", 'datatype':'json'}
   register: map_vol2
-  with_together:
-    - "{{ up_port }}"
   delegate_to: localhost
   tags: install
 
@@ -93,4 +100,5 @@
     --vault-password-file {{ login_pv_vault_file }}
   changed_when: false
   run_once: true
+  delegate_to: localhost
   when: "'$ANSIBLE_VAULT;' in config_content.stdout"

+ 5 - 0
control_plane/roles/powervault_me4/tasks/ports.yml

@@ -24,6 +24,7 @@
   changed_when: false
   no_log: true
   register: config_content
+  delegate_to: localhost
   run_once: true
 
 - name: Decrpyt login_vars.yml
@@ -32,6 +33,7 @@
     --vault-password-file {{ login_pv_vault_file }}
   changed_when: false
   run_once: true
+  delegate_to: localhost
   when: "'$ANSIBLE_VAULT;' in config_content.stdout"
 
 - name: Include variable file login_vars.yml
@@ -53,6 +55,7 @@
       {'datatype': 'json'}
     validate_certs: no
   register: port_session_key
+  delegate_to: localhost
   tags: install
 
 - name: Show ports
@@ -65,6 +68,7 @@
     headers:
       {'sessionKey': "{{ port_session_key.json.status[0].response }}", 'datatype':'json'}
   register: show_ports
+  delegate_to: localhost
   tags: install
 
 - name: Up ports
@@ -95,4 +99,5 @@
   with_together: 
     - "{{ set_port_ip }}"
     - "{{ up_port }}"
+  delegate_to: localhost
   tags: install

+ 4 - 4
control_plane/roles/powervault_me4/tasks/pv_me4_prereq.yml

@@ -57,14 +57,14 @@
 
 - name: Get the product id
   set_fact:
-    pv_id: system_info.json.system[0]['product-id']
+    pv_id: "{{ system_info.json.system[0]['product-id'] }}"
 
 - name: Verify the product id and model no. of device
   fail:
     msg: "{{ fail_pv_support }}"
   when:
-    - scsi_product_id in system_info.json.system[0]['scsi-product-id']
-    - pv_id  == "ME4084" or pv_id == "ME4024"  or pv_id == "ME4012"
+    - scsi_product_id not in system_info.json.system[0]['scsi-product-id']
+    - pv_id  != "ME4084" or pv_id != "ME4024"  or pv_id != "ME4012"
 
 - name: Set system name
   uri:
@@ -76,5 +76,5 @@
     headers:
       {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
   register: system_name
-  when: powervault_me4_system_name != ""
+  when: powervault_me4_system_name
   tags: install

+ 15 - 3
control_plane/roles/powervault_me4/tasks/pv_validation.yml

@@ -33,6 +33,7 @@
 - name: Check if system name has space
   shell: echo {{ powervault_me4_system_name }} | grep '\s' -c
   register: space_count
+  changed_when: false
   ignore_errors: true
   tags: install
 
@@ -46,6 +47,7 @@
 - name: Check if volume name has space
   shell: echo {{ powervault_me4_k8s_volume_name }} | grep '\s' -c
   register: vol_count1
+  changed_when: false
   ignore_errors: true
   tags: install
 
@@ -59,6 +61,7 @@
 - name: Check if volume name has space
   shell: echo {{ powervault_me4_slurm_volume_name }} | grep '\s' -c
   register: vol_count2
+  changed_when: false
   ignore_errors: true
   tags: install
 
@@ -80,6 +83,7 @@
   assert:
     that:
       - disk_count.stdout == "0"
+      - powervault_me4_disk_group_name | length > 1
       - powervault_me4_disk_group_name | length < 30
     msg: "{{ system_name_wrong }}" 
 
@@ -89,7 +93,7 @@
       - powervault_me4_snmp_notify_level | length >1
       - powervault_me4_snmp_notify_level == "crit" or powervault_me4_snmp_notify_level == "error" or powervault_me4_snmp_notify_level == "warn" or powervault_me4_snmp_notify_level == "resolved" or powervault_me4_snmp_notify_level == "info" or powervault_me4_snmp_notify_level == "none"
     fail_msg: "{{ snmp_wrong_value }}"
-    success_msg: "{{ snmp_success }}" 
+    success_msg: "{{ snmp_success }}"
 
 - name: Assert RAID value
   assert:
@@ -113,12 +117,20 @@
     that: 
       - powervault_me4_pool == "a" or powervault_me4_pool == "A" or powervault_me4_pool == "b" or powervault_me4_pool == "B"
     msg: "{{ wrong_pool }}"
+  when: powervault_me4_pool_type == "virtual"
+
+- name: Check pool type
+  assert:
+    that:
+      - powervault_me4_pool_type | length > 1
+      - powervault_me4_pool_type | lower == "virtual" or powervault_me4_pool_type | lower == "linear"
+    msg: "{{ wrong_pool_type }}"
 
 - name: Check parition percentage
   assert:
     that:
       - powervault_me4_disk_partition_size|int
-      - powervault_me4_disk_partition_size|int < 99
+      - powervault_me4_disk_partition_size|int < 90
       - powervault_me4_disk_partition_size|int > 5
     msg: "{{ wrong_partition }}"
 
@@ -133,4 +145,4 @@
 - name: Assert the nic provided
   assert:
     that:
-      - powervault_me4_server_nic | length > 2
+      - powervault_me4_server_nic | length > 2

+ 76 - 7
control_plane/roles/powervault_me4/tasks/volume.yml

@@ -30,22 +30,45 @@
   register: vol_session_key
   tags: install
 
-- name: Add disk group
+- name: Add disk group in virtual pool
   uri:
-    url: https://{{ inventory_hostname }}/api/add/disk-group/type/{{ type }}/disks/{{ powervault_me4_disk_range }}/level/{{ powervault_me4_raid_levels }}/pool/{{ powervault_me4_pool }}/{{ powervault_me4_disk_group_name }}
+    url: https://{{ inventory_hostname }}/api/add/disk-group/type/{{ powervault_me4_pool_type }}/disks/{{ powervault_me4_disk_range }}/level/{{ powervault_me4_raid_levels }}/pool/{{ powervault_me4_pool }}/{{ powervault_me4_disk_group_name }}
     method: GET
     body_format: json
     validate_certs: no
     use_proxy: no
     headers:
       {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
-  register: pv_disk
+  register: pv_disk1
+  when: powervault_me4_pool_type|lower == "virtual"
+  tags: install
+
+- name: Add disk group in linear pool
+  uri:
+    url: https://{{ inventory_hostname }}/api/add/disk-group/type/{{ powervault_me4_pool_type }}/disks/{{ powervault_me4_disk_range }}/level/{{ powervault_me4_raid_levels }}/{{ powervault_me4_disk_group_name }}
+    method: GET
+    body_format: json
+    validate_certs: no
+    use_proxy: no
+    headers:
+      {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
+  register: pv_disk2
+  when: powervault_me4_pool_type|lower == "linear"
   tags: install
 
 - name: Assert if disk group created or not
   fail:
-    msg: "{{ pv_disk.json.status[0].response }}"
-  when:  pv_disk.json.status[0] ['response-type'] == "Error"
+    msg: "{{ pv_disk1.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_type|lower == "virtual"
+    - pv_disk1.json.status[0] ['response-type'] == "Error"
+
+- name: Assert if disk group created or not
+  fail:
+    msg: "{{ pv_disk2.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_typ|lower e== "linear"
+    - pv_disk2.json.status[0] ['response-type'] == "Error"
 
 - name: Create volume1
   uri:
@@ -57,6 +80,7 @@
     headers:
       {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
   register: pv_vol1
+  when: powervault_me4_pool_type|lower == "virtual"
   tags: install
 
 - name: Create volume2
@@ -69,14 +93,59 @@
     headers:
       {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
   register: pv_vol2
+  when: powervault_me4_pool_type|lower == "virtual"
+  tags: install
+
+- name: Create volume1
+  uri:
+    url: https://{{ inventory_hostname }}/api/create/volume/size/{{ powervault_me4_volume_size }}/pool/{{ powervault_me4_disk_group_name }}/{{ powervault_me4_k8s_volume_name }}
+    method: GET
+    body_format: json
+    validate_certs: no
+    use_proxy: no
+    headers:
+      {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
+  register: pv_vol3
+  when: powervault_me4_pool_type|lower == "linear"
+  tags: install
+
+- name: Create volume2
+  uri:
+    url: https://{{ inventory_hostname }}/api/create/volume/size/{{ powervault_me4_volume_size }}/pool/{{ powervault_me4_disk_group_name }}/{{ powervault_me4_slurm_volume_name }}
+    method: GET
+    body_format: json
+    validate_certs: no
+    use_proxy: no
+    headers:
+      {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
+  register: pv_vol4
+  when: powervault_me4_pool_type|lower == "linear"
   tags: install
 
 - name: Assert if k8s_volume created correctly
   fail:
     msg: "{{ pv_vol1.json.status[0].response }}"
-  when: pv_vol1.json.status[0]['response-type'] == "Error"
+  when:
+    - powervault_me4_pool_type| lower == "virtual"
+    - pv_vol1.json.status[0]['response-type'] == "Error"
 
 - name: Assert if slurm_volume created correctly
   fail:
     msg: "{{ pv_vol2.json.status[0].response }}"
-  when: pv_vol2.json.status[0]['response-type'] == "Error"
+  when:
+    - powervault_me4_pool_type| lower == "virtual"
+    - pv_vol2.json.status[0]['response-type'] == "Error"
+
+- name: Assert if k8s_volume created correctly
+  fail:
+    msg: "{{ pv_vol3.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_type| lower == "linear"
+    - pv_vol3.json.status[0]['response-type'] == "Error"
+
+- name: Assert if slurm_volume created correctly
+  fail:
+    msg: "{{ pv_vol4.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_type|lower == "linear"
+    - pv_vol4.json.status[0]['response-type'] == "Error"

+ 1 - 2
control_plane/roles/powervault_me4/vars/main.yml

@@ -23,7 +23,7 @@ correct_disk_range: "Succes: Disk range is correct"
 wrong_pool: "Failed: Given pool value is wrong"
 wrong_partition: "Failed: Given partition is wrong"
 wrong_vol_size: "Failed: Given volume size is wrong"
-
+wrong_pool_type: "Failed: Given pool type value is wrong"
 
 # Usage: pv_me4_prereq.yml
 scsi_product_id: ME4
@@ -37,7 +37,6 @@ base_pv_file: "{{ role_path }}/../../input_params/base_vars.yml"
 # Usage: volume.yml
 lun1: 0
 lun2: 1
-type: virtual
 
 # Usage: ports.yml
 port_ip: 192.168.25.

+ 22 - 1
control_plane/roles/provision_cobbler/files/cobbler_configurations.yml

@@ -105,6 +105,27 @@
       mode: 0775
     tags: install
 
+  - name: Pxe menu
+    copy:
+      src: "/root/omnia/control_plane/roles/provision_cobbler/files/menu.yml"
+      dest: "/etc/cobbler/boot_loader_conf/pxedefault.template"
+      mode: 0775
+    tags: install
+
+  - name: Assign default grub option
+    replace:
+      path: "/var/lib/cobbler/grub_config/grub/grub.cfg"
+      regexp: "^set default=\'local\'"
+      replace: "set default=\'1\'"
+    tags: install
+
+  - name: Assign default grub timeout
+    replace:
+      path: "/var/lib/cobbler/grub_config/grub/grub.cfg"
+      regexp: '^set timeout=80'
+      replace: 'set timeout=10'
+    tags: install
+
   - name: Syncing of cobbler
     command: cobbler sync
     changed_when: false 
@@ -144,4 +165,4 @@
     cron:
       name: Create inventory
       minute: "*/5"
-      job: "{{ ansible_playbook_path.stdout.split(' ')[1] }} /root/inventory_creation.yml"
+      job: "{{ ansible_playbook_path.stdout.split(' ')[1] }} /root/inventory_creation.yml"

+ 48 - 25
control_plane/roles/provision_idrac/tasks/check_prerequisites.yml

@@ -42,30 +42,6 @@
 - name: Set management_station_ip
   set_fact:
     management_station_ip: "{{ fetch_ip.stdout }}"
-    
-- name: Check NFS share access
-  idrac_server_config_profile:
-    idrac_ip: "{{ inventory_hostname }}"
-    idrac_user: "{{ idrac_username }}"
-    idrac_password: "{{ idrac_password }}"
-    share_name: "{{ management_station_ip }}:{{ nfs_share_offline_repo }}"
-    command: "export"
-    scp_components: "BIOS"
-    scp_file: "{{ nfs_check_file }}"
-    export_format: XML
-    export_use: Default
-    job_wait: true
-  register: nfs_check
-  ignore_errors: true
-  until: not nfs_check.failed
-  retries: 3
-
-- name: Missing entries in nfs exports
-  fail:
-    msg: "{{ missing_exports_fail_msg }}"
-  when:
-    - nfs_check_key in nfs_check.msg or
-      nfs_check_key in nfs_check.scp_status.Status
 
 - name: Initialize variables
   set_fact:
@@ -77,6 +53,8 @@
     datacenter_license: false
     provision_status: false
     model_status: false
+    idrac_license_name: ""
+    deploy_os_status: false
 
 - block:
     - name: Check tower_cli.cfg is encrypted
@@ -192,6 +170,7 @@
     - name: Set enterprise license status
       set_fact:
         enterprise_license: true
+        idrac_license_name: "{{ idrac_info.system_info.License[my_idx1].LicenseDescription }}"
       with_items: "{{ idrac_info.system_info.License }}"
       when:
         - '"iDRAC" in idrac_info.system_info.License[my_idx1].LicenseDescription'
@@ -204,6 +183,7 @@
     - name: Set datacenter license status
       set_fact:
         datacenter_license: true
+        idrac_license_name: "{{ idrac_info.system_info.License[my_idx1].LicenseDescription }}"
       with_items: "{{ idrac_info.system_info.License }}"
       when:
         - '"iDRAC" in idrac_info.system_info.License[my_idx2].LicenseDescription'
@@ -212,4 +192,47 @@
         - '"Healthy" in idrac_info.system_info.License[my_idx2].PrimaryStatus'
       loop_control:
         index_var: my_idx2
-  when: not provision_status
+
+    - name: Change provision mode in absence of license
+      set_fact:
+        provision_method: "pxe"
+      when: not (enterprise_license or datacenter_license)
+
+    - name: Firmware version of iDRAC9 not supported
+      debug:
+        msg: "{{ idrac9_firmware_not_supported_msg }}"
+      when:
+        - '"iDRAC9" in idrac_license_name'
+        - idrac_info.system_info.iDRAC[0].FirmwareVersion < idrac9_supported_version
+
+    - name: Firmware version of iDRAC8 not supported
+      debug:
+        msg: "{{ idrac8_firmware_not_supported_msg }}"
+      when:
+        - '"iDRAC8" in idrac_license_name'
+        - idrac_info.system_info.iDRAC[0].FirmwareVersion < idrac8_supported_version
+
+    - name: Check NFS share access
+      dellemc.openmanage.idrac_server_config_profile:
+        idrac_ip: "{{ inventory_hostname }}"
+        idrac_user: "{{ idrac_username }}"
+        idrac_password: "{{ idrac_password }}"
+        share_name: "{{ management_station_ip }}:{{ nfs_share_offline_repo }}"
+        command: "export"
+        scp_components: "BIOS"
+        scp_file: "{{ nfs_check_file }}"
+        export_format: XML
+        export_use: Default
+        job_wait: true
+      register: nfs_check
+      ignore_errors: true
+      until: not nfs_check.failed
+      retries: "{{ retries_count }}"
+
+    - name: Missing entries in nfs exports
+      fail:
+        msg: "{{ missing_exports_fail_msg }}"
+      when:
+        - nfs_check_key in nfs_check.msg or
+          nfs_check_key in nfs_check.scp_status.Status
+  when: not provision_status

+ 24 - 22
control_plane/roles/provision_idrac/tasks/deploy_os.yml

@@ -13,24 +13,21 @@
 # limitations under the License.
 ---
 
-- name: Set one-time boot device to PXE
-  community.general.redfish_command:
-    category: Systems
-    command: SetOneTimeBoot
-    bootdevice: "Pxe"
-    baseuri: "{{ inventory_hostname }}"
-    username: "{{ idrac_username }}"
-    password: "{{ idrac_password }}"
-  when: not (enterprise_license or datacenter_license)
+- name: Configure boot order for PXE booting
+  dellemc.openmanage.idrac_bios:
+    idrac_ip: "{{ inventory_hostname }}"
+    idrac_user: "{{ idrac_username }}"
+    idrac_password: "{{ idrac_password }}"
+    attributes:
+      SetBootOrderEn: NIC.PxeDevice.1-1,NIC.PxeDevice.2-1,NIC.PxeDevice.3-1,NIC.PxeDevice.4-1
+      UefiBootSeq: NIC.PxeDevice.1-1,NIC.PxeDevice.2-1,NIC.PxeDevice.3-1,NIC.PxeDevice.4-1
+  register: deploy_os_pxe
+  when: provision_method == "pxe"
 
-- name: Reboot server
-  dellemc.openmanage.redfish_powerstate:
-    baseuri: "{{ inventory_hostname }}"
-    username: "{{ idrac_username }}"
-    password: "{{ idrac_password }}"
-    reset_type: ForceRestart
-  when: not (enterprise_license or datacenter_license)
-  register: deploy_os
+- name: Set deploy_os_status when provision_method == pxe
+  set_fact:
+    deploy_os_status: "{{ not deploy_os_pxe.failed }}"
+  when: provision_method == "pxe"
 
 - name: Install OS using iDRAC
   dellemc.openmanage.idrac_os_deployment:
@@ -40,12 +37,17 @@
     share_name: "{{ management_station_ip }}:{{ nfs_share_offline_repo }}"
     iso_image: "{{ unattended_iso_filename }}"
     expose_duration: "{{ expose_duration }}"
-  register: deploy_os
-  when: enterprise_license or datacenter_license
+  register: deploy_os_idrac
+  when: provision_method == "idrac"
+
+- name: Set deploy_os_status when provision_method == idrac
+  set_fact:
+    deploy_os_status: "{{ not deploy_os_idrac.failed }}"
+  when: provision_method == "idrac"
 
 - name: Add to provisioned_hosts to inventory
   command: >-
-    awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }} 
+    awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
     hosts create --name {{ inventory_hostname }} --inventory "{{ provisioned_idrac_inventory_name }}"
   register: update_inventory
   changed_when: true
@@ -53,9 +55,9 @@
   when:
     - awx_search_key in hostname.stdout
     - inventory_hostname not in fetch_inventory.stdout
-    - not deploy_os.failed
+    - deploy_os_status
 
 - name: Provision OS status
   debug:
     msg: "{{ provision_os_msg }}"
-  when: not deploy_os.failed
+  when: deploy_os_status

+ 22 - 13
control_plane/roles/provision_idrac/tasks/import_scp.yml

@@ -44,22 +44,29 @@
     path: "{{ role_path }}/files/{{ scp_filename }}"
     line: "{{ item }}"
     insertafter: '^(.*)SysProfile'
-    with_items:
-      - '  <Attribute Name="PxeDev1VlanPriority">0</Attribute>'
-      - '  <Attribute Name="PxeDev1Interface">NIC.Integrated.1-1-1</Attribute>'
-      - '  <Attribute Name="PxeDev1VlanId">1</Attribute>'
-      - '  <Attribute Name="PxeDev1VlanEnDis">Enabled</Attribute>'
-      - '  <Attribute Name="PxeDev1Protocol">IPv4</Attribute>'
-      - '  <Attribute Name="PxeDev1EnDis">Enabled</Attribute>'
-  when: not (enterprise_license or datacenter_license)
+  with_items:
+    - '  <Attribute Name="PxeDev1EnDis">Enabled</Attribute>'
+    - '  <Attribute Name="PxeDev2EnDis">Enabled</Attribute>'
+    - '  <Attribute Name="PxeDev3EnDis">Enabled</Attribute>'
+    - '  <Attribute Name="PxeDev4EnDis">Enabled</Attribute>'
+    - '  <Attribute Name="HttpDev1EnDis">Disabled</Attribute>'
+    - '  <Attribute Name="HttpDev2EnDis">Disabled</Attribute>'
+    - '  <Attribute Name="HttpDev3EnDis">Disabled</Attribute>'
+    - '  <Attribute Name="HttpDev4EnDis">Disabled</Attribute>'
+  when: provision_method == "pxe"
   run_once: true
 
 - name: Disable PXE attributes to SCP file
   lineinfile:
     path: "{{ role_path }}/files/{{ scp_filename }}"
-    line: '  <Attribute Name="PxeDev1EnDis">Disabled</Attribute>'
+    line: "{{ item }}"
     insertafter: '^(.*)SysProfile'
-  when: enterprise_license or datacenter_license
+  with_items:
+    - '  <Attribute Name="PxeDev1EnDis">Disabled</Attribute>'
+    - '  <Attribute Name="PxeDev2EnDis">Disabled</Attribute>'
+    - '  <Attribute Name="PxeDev3EnDis">Disabled</Attribute>'
+    - '  <Attribute Name="PxeDev4EnDis">Disabled</Attribute>'
+  when: provision_method == "idrac"
   run_once: true
 
 - name: Add SNMP community name attribute to SCP file
@@ -89,12 +96,14 @@
     command: "import"
     scp_file: "{{ scp_filename }}"
     scp_components: "ALL"
-    shutdown_type: "Graceful"
+    shutdown_type: "Forced"
     job_wait: "True"
   register: import_scp_status
-
+  until: not import_scp_status.failed
+  retries: "{{ retries_count }}"
+  
 - name: Remove the SCP file
   file:
     path: "{{ role_path }}/files/{{ scp_filename }}"
     state: absent
-  run_once: true
+  run_once: true

+ 6 - 1
control_plane/roles/provision_idrac/vars/main.yml

@@ -44,6 +44,11 @@ nfs_check_file: "nfs_check.xml"
 provisioned_idrac_inventory_name: "provisioned_idrac_inventory"
 awx_vars_filename: ".tower_cli.cfg"
 awx_vaultname: ".tower_vault_key"
+idrac9_supported_version: "4.40.40.00"
+idrac9_firmware_not_supported_msg: "[WARNING]Firmware version of iDRAC9 less than 4.40.40.00 is not supported for provisioning. Following tasks can be failed due to older firmware version. In case of failure, update firmware manually and re-run the idrac_template"
+idrac8_supported_version: "2.75.75.75"
+idrac8_firmware_not_supported_msg: "[WARNING]Firmware version of iDRAC8 less than 2.75.75.75 is not supported for provisioning. Following tasks can be failed due to older firmware version. In case of failure, update firmware manually and re-run the idrac_template"
+retries_count: 5
 
 # Usage: update_firmware.yml
 idrac_port: 443
@@ -60,4 +65,4 @@ raid_level: "RAID 0"
 # Usage: deploy_os.yml
 expose_duration: 60
 file_permission: 0644
-provision_os_msg: "OS provisioning is initiated. Wait for installation to complete for all servers."
+provision_os_msg: "OS provisioning is initiated. Wait for installation to complete for all servers."

+ 10 - 0
control_plane/test/temp_scp.xml

@@ -0,0 +1,10 @@
+<SystemConfiguration>
+<Component FQDD="BIOS.Setup.1-1">    
+<Attribute Name="BootMode">Uefi</Attribute>
+<Attribute Name="PxeDev1EnDis">Enabled</Attribute>
+<Attribute Name="SysProfile">PerfOptimized</Attribute>
+</Component>
+<Component FQDD="iDRAC.Embedded.1">
+<Attribute Name="SNMPAlert.1#State">Enabled</Attribute>
+</Component>
+</SystemConfiguration>

+ 709 - 0
control_plane/test/test_control_plane.yml

@@ -0,0 +1,709 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Testase OMNIA_1.1_MS_TC_001
+# Test Case to validate the execution of control_plane.yml with valid inputs -- Default Test Case
+- name: OMNIA_1.1_MS_TC_001
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: VERIFY_OMNIA_01
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+             
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+          tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_002
+# Test Case to validate the execution of control_place.yml with no input
+- name: OMNIA_1.1_MS_TC_002
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_002
+
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc02 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane_common role
+          include_role:
+            name: ../roles/control_plane_common
+          vars:
+            base_vars_filename: ../input_params/base_vars.yml
+      rescue:
+        - name: Validate error
+          assert:
+            that: input_base_failure_msg in ansible_failed_result.msg
+            success_msg: "{{ input_config_check_success_msg }}"
+            fail_msg: "{{ input_config_check_fail_msg }}"
+      tags: Execute_common_role
+    
+# Testcase OMNIA_1.1_MS_TC_003 and OMNIA_1.1_MS_TC_004
+# Test Case to validate the execution of control_plane.yml with NFS share already present
+- name: OMNIA_1.1_MS_TC_003
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_003,TC_004
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+    
+    - name: Creating new control_plane.yml
+      copy:
+        dest: "../test_control_plane.yml"
+        content: |
+         - name: Executing omnia roles
+           hosts: localhost
+           connection: local
+           roles:
+              - control_plane_common
+              - control_plane_repo
+        mode: '0644'
+      tags: Replace_control_plane
+      
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook test_control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          tags: Execute_control_plane
+    
+    - block:
+        - name: Execute validation script
+          include_tasks: "{{ control_plane_validation_script_path }}" 
+          tags: Execute_Validation_Script
+      
+    - name: Delete newly created control_plane.yml
+      file:
+        state: absent
+        path: ../test_control_plane.yml
+      when: foo_stat.stat.exists
+      tags: Delete_test_control_plane
+
+# Testcase OMNIA_1.1_MS_TC_005
+# Test Case to validate the execution of control_plane.yml after a successful run and validate k8s pods along with services after reboot.
+- name: OMNIA_1.1_MS_TC_005
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_005
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+      
+    - name: Check uptime
+      command: uptime -p
+      register: system_uptime
+      changed_when: false
+      tags: Check_Uptime
+      
+    - name: Extracting data from system_uptime
+      set_fact:
+        uptime_number: "{{ system_uptime.stdout.split()[1] }}"
+        uptime_min: "{{ system_uptime.stdout.split()[2] }}"
+    
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      when: uptime_number|int > 15
+      tags: Replace_input
+      
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          when: uptime_number|int > 15
+          tags: Execute_control_plane
+          
+        - name: Reboot system
+          command: reboot
+          when: uptime_number|int > 15
+          tags: Reboot_System
+    
+    - block:
+        - name: Wait for 30sec for kubectl to get things ready
+          pause:
+            seconds: 200
+          when: (uptime_number| int <= 15) and (uptime_min == "minutes")
+
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+          when: (uptime_number| int <= 15) and (uptime_min == "minutes")
+          tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_006 and OMNIA_1.1_MS_TC_007
+# Test Case to validate the execution of control_plane.yml and after a successful run the user deletes/stops all pods
+- name: OMNIA_1.1_MS_TC_006
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_006,TC_007
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: "0644"
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+      
+    - name: Delete all containers
+      command: kubectl delete --all namespaces
+      changed_when: false
+      tags: Delete_Pods
+      
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+      
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_008
+# Test Case to validate the execution of control_plane.yml with infiniband=false, powervault=true and ethernet=true
+- name: OMNIA_1.1_MS_TC_008
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_008
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - name: Editing base_vars.yml for powervault to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ powervault_false }}"
+        replace: "{{ powervault_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for ethernet to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ ethernet_false }}"
+        replace: "{{ ethernet_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for infiniband to false
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ infiniband_true }}"
+        replace: "{{ infiniband_false }}"
+      tags: Edit_base_vars
+        
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_009
+# Test Case to validate the execution of control_plane.yml with infiniband=true, powervault=false and ethernet=true
+- name: OMNIA_1.1_MS_TC_009
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_009
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - name: Editing base_vars.yml for powervault to false
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ powervault_true }}"
+        replace: "{{ powervault_false }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for ethernet to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ ethernet_false }}"
+        replace: "{{ ethernet_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for infiniband to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ infiniband_false }}"
+        replace: "{{ infiniband_true }}"
+      tags: Edit_base_vars
+        
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_010
+# Test Case to validate the execution of control_plane.yml with infiniband=true, powervault=true and ethernet=false
+- name: OMNIA_1.1_MS_TC_010
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_010
+
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - name: Editing base_vars.yml for powervault to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ powervault_false }}"
+        replace: "{{ powervault_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for ethernet to false
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ ethernet_true }}"
+        replace: "{{ ethernet_false }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for infiniband to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ infiniband_false }}"
+        replace: "{{ infiniband_true }}"
+      tags: Edit_base_vars
+        
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_011
+# Test Case to validate the execution of control_plane.yml with firmware update set to False
+- name: OMNIA_1.1_MS_TC_011
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_011
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+      
+    - name: Set firmware update to false
+      replace:
+        path: ../input_params/idrac_vars.yml
+        regexp: "{{ fw_update_true }}"
+        replace: "{{ fw_update_false }}"
+      tags: Set_FW_Update
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+      
+    - name: Check if firmware updates folder exists
+      stat:
+        path: /var/nfs_repo/dellupdates
+      register: fw_update_dir
+      tags: Set_FW_Update
+      
+    - name: Verify firmware updates were downloaded
+      assert:
+        that:
+          - not fw_update_dir.stat.exists
+        success_msg: "{{ fw_success_validation }}"
+        fail_msg: "{{ fw_fail_validation }}"
+      tags: Set_FW_Update
+        
+# Testcase OMNIA_1.1_MS_TC_012
+# Test Case to validate the execution of control_plane.yml with firmware update set to true
+- name: OMNIA_1.1_MS_TC_012
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_012
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+      
+    - name: Set firmware update to true
+      replace:
+        path: ../input_params/idrac_vars.yml
+        regexp: "{{ fw_update_false }}"
+        replace: "{{ fw_update_true }}"
+      tags: Set_FW_Update
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+      
+    - name: Check if firmware updates folder exists
+      stat:
+        path: /var/nfs_repo/dellupdates
+      register: fw_update_dir
+      tags: Set_FW_Update
+      
+    - name: Verify firmware updates were downloaded
+      assert:
+        that:
+          - fw_update_dir.stat.exists
+        success_msg: "{{ fw_success_validation }}"
+        fail_msg: "{{ fw_fail_validation }}"
+      tags: Set_FW_Update
+
+# Testcase OMNIA_1.1_MS_TC_013
+# Test Case to validate the execution of control_plane.yml with docker login credential
+- name: OMNIA_1.1_MS_TC_013
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml  
+   
+  gather_subset:
+    - 'min'
+  tags: TC_013
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+    
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+      
+    - name: Change docker params in omnia_config.yml
+      replace:
+        path: ../../omnia_config.yml
+        regexp: "docker_username: .*$"
+        replace: 'docker_username: "{{ docker_user }}"'
+      tags: Set_Docker_Creds
+    
+    - name: Assert if the credentials are valid in test_control_plane_vars.yml
+      assert:
+        that:
+          - 'docker_user != "User"'
+          - 'docker_password != "Password"'
+        success_msg: "{{ valid_docker_creds }}"
+        fail_msg: "{{ invalid_docker_creds }}"
+      tags: Set_Docker_Creds
+    
+    - name: Change docker params in omnia_config.yml
+      replace:
+        path: ../../omnia_config.yml
+        regexp: "docker_password: .*$"
+        replace: 'docker_password: "{{ docker_password }}"'
+      tags: Set_Docker_Creds
+    
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          tags: Execute_control_plane
+    
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+          tags: Execute_Validation_Script
+      
+    - name: Fetch docker info
+      shell: docker login & sleep 3
+      register: new
+      changed_when: false
+      tags: Set_Docker_Creds
+
+    - name: Assert that docker was used to pull images 
+      assert:
+        that:
+          - "'Login did not succeed' in new.stderr"
+        success_msg: "{{ docker_success_validation }}"
+        fail_msg: "{{ docker_fail_validation }}"
+      tags: Set_Docker_Creds

+ 271 - 0
control_plane/test/test_control_plane_validation.yml

@@ -0,0 +1,271 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---  
+
+- block:
+
+    - name: Fetch Package info
+      package_facts:
+        manager: auto
+      
+    - name: Verify all packages are installed
+      assert:
+        that: "'{{ item }}' in ansible_facts.packages"
+        success_msg: "{{ install_package_success_msg }}"
+        fail_msg: "{{ install_package_fail_msg }}"
+      when: "'python-docker' not in item"
+      with_items: "{{ common_packages }}"
+      ignore_errors: true
+      
+    - name: Check login_vars is encrypted
+      command: cat {{ login_vars_filename }}
+      changed_when: false
+      register: config_content
+       
+    - name: Validate login file is encypted or not
+      assert:
+        that: "'$ANSIBLE_VAULT;' in config_content.stdout"
+        fail_msg: "{{ login_vars_fail_msg }}"
+        success_msg: "{{ login_vars_success_msg }}"
+            
+#  Installing a required package : JQ      
+    - name: Installing jq (JSON Query)
+      package:
+        name: "{{ test_package }}"
+        state: present
+           
+#  Checking if all the required pods are working
+    - name: Get pods info
+      shell: kubectl get pods --all-namespaces
+      register: all_pods_info
+          
+    - name: Check the count of pods
+      set_fact:
+         count: "{{ all_pods_info.stdout_lines|length - 1 }}"
+          
+    - name: Check if all the pods are running
+      assert:
+        that:
+          - "'Running' in all_pods_info.stdout_lines[{{ item }}]"
+        fail_msg: "{{ check_pods_fail_msg }}"
+        success_msg: "{{ check_pods_success_msg }}"
+      with_sequence: start=1 end={{ count }}
+      
+#  Checking if NFS Server is running and Custom ISO is created
+    - name: Get NFS Stat
+      shell: systemctl status nfs-idmapd
+      register: nfstat_info
+       
+    - name: Verify NFS Stat is running
+      assert:
+        that:
+          - "'Active: active (running)' in nfstat_info.stdout"
+        success_msg: "{{ nfs_share_success_msg }}"
+        fail_msg: "{{ nfs_share_fail_msg }}"
+        
+    - name: Check nfs mount point
+      stat:
+        path: "{{ nfs_mount_Path }}"
+      register: nfs_mount_info
+          
+    - name: Verify nfs share is mounted
+      assert:
+        that:
+          - "{{ nfs_mount_info.stat.exists }}"
+        success_msg: "{{ nfs_mount_success_msg }}"
+        fail_msg: "{{ nfs_mount_fail_msg }}"
+           
+    - name: Check Custom ISO
+      stat:
+        path: "{{ check_iso_path }}"
+      register: check_iso_info
+          
+    - name: Verify Custom ISO is created in the NFS repo
+      assert:
+        that:
+          - "{{ check_iso_info.stat.exists }}"
+        success_msg: "{{ check_iso_success_msg }}"
+        fail_msg: "{{ check_iso_fail_msg }}"
+      
+#  Checking if network-config container is running
+    
+    - name: Get Pod info for network-config
+      shell: |
+         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id) \(.metadata.name) \(.state)"'
+      register: network_config_pod_info
+          
+    - name: Get Pod Status for network-config
+      assert:
+        that:
+          - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
+        success_msg: "{{ network_config_pod_success_msg }}"
+        fail_msg: "{{ network_config_pod_fail_msg }}"
+         
+    - name: Get Pod facts
+      shell: |
+            crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id)"'
+      register: network_config_pod_fact
+         
+    - name: Parse container id for the pods
+      set_fact: 
+        container_id: "{{ network_config_pod_fact.stdout[1:-1] }}"   
+          
+    - name: Check dhcpd,xinetd service is running
+      command: crictl exec {{ container_id }} systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: pod_service_check
+      with_items:
+        - dhcpd
+        - xinetd
+            
+    - name: Verify dhcpd, xinetd service is running
+      assert:
+        that:
+          - "'active' in pod_service_check.results[{{ item }}].stdout"
+          - "'inactive' not in pod_service_check.results[{{ item }}].stdout"
+          - "'unknown' not in pod_service_check.results[{{ item }}].stdout"
+        fail_msg: "{{ pod_service_check_fail_msg }}"
+        success_msg: "{{ pod_service_check_success_msg }}"
+      with_sequence: start=0 end={{ pod_service_check.results|length - 1 }}
+         
+# Checking if cobbler-container is running
+    - name: Get Pod info for cobbler
+      shell: |
+         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id) \(.metadata.name) \(.state)"'
+      register: network_config_pod_info
+      
+    - name: Get Pod Status for cobbler
+      assert:
+        that:
+          - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
+        success_msg: "{{ cobbler_pod_success_msg }}"
+        fail_msg: "{{ cobbler_pod_fail_msg }}"
+      
+    - name: Get Pod facts for cobbler
+      shell: |
+            crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id)"'
+      register: network_config_pod_fact
+      
+    - name: Extract cobbler pod id
+      set_fact: 
+        cobbler_id: "{{ network_config_pod_fact.stdout[1:-1] }}"   
+      
+    - name: Check tftp,dhcpd,xinetd,cobblerd service is running
+      command: crictl exec {{ cobbler_id }} systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: pod_service_check
+      with_items:
+        - dhcpd
+        - tftp
+        - xinetd
+        - cobblerd
+        
+    - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
+      assert:
+        that:
+          - "'active' in pod_service_check.results[{{  item  }}].stdout"
+          - "'inactive' not in pod_service_check.results[{{  item  }}].stdout"
+          - "'unknown' not in pod_service_check.results[{{  item  }}].stdout"
+        fail_msg: "{{pod_service_check_fail_msg}}"
+        success_msg: "{{pod_service_check_success_msg}}"
+      with_sequence: start=0 end=3
+
+# Checking Cron-Jobs
+    - name: Check crontab list
+      command: crictl exec {{ cobbler_id }} crontab -l
+      changed_when: false
+      register: crontab_list
+
+    - name: Verify crontab list
+      assert:
+        that:
+          - "'* * * * * /usr/bin/ansible-playbook /root/tftp.yml' in crontab_list.stdout"
+          - "'*/5 * * * * /usr/bin/ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
+        fail_msg: "{{cron_jobs_fail_msg}}"
+        success_msg: "{{cron_jobs_success_msg}}"
+
+#  Checking subnet-manger pod is running and open sm is running 
+#  Comment if infiniband is not connected
+    - name: Fetch subnet-manager stats
+      shell: kubectl get pods -n subnet-manager 
+      register: sm_manager_info
+
+    - name: Verify subnet_manager container is running
+      assert:
+        that:
+          - "'Running' in sm_manager_info.stdout_lines[1]"
+        fail_msg: "{{subnet_manager_fail_msg}}"
+        success_msg: "{{subnet_manager_success_msg}}"
+
+# Checking awx pod is running
+
+    - name: Get Pod info for awx
+      shell: |
+         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "awx") | "\(.id) \(.metadata.name) \(.state)"'
+      register: awx_config_pod_info
+           
+    - name: Get Pod Status for awx
+      assert:
+        that:
+          - network_config_pod_info.stdout_lines[{{ item }}] | regex_search( "{{ container_info }}")
+        success_msg: "{{ awx_pod_success_msg }}"
+        fail_msg: "{{ awx_pod_fail_msg }}"
+      ignore_errors: yes
+      with_sequence: start=0 end={{ network_config_pod_info.stdout_lines |length - 1 }}
+          
+    - name: Get pvc stats
+      shell: |
+          kubectl get pvc -n awx -o json |jq '.items[] | "\(.status.phase)"'
+      register: pvc_stats_info
+            
+    - name: Verify if pvc stats is running
+      assert:
+        that:
+          - "'Bound' in pvc_stats_info.stdout"
+        fail_msg: "{{ pvc_stat_fail_msg }}"
+        success_msg: "{{ pvc_stat_success_msg }}"
+      with_sequence: start=0 end={{ pvc_stats_info.stdout_lines |length|int - 1 }}
+            
+    - name: Get svc stats
+      shell: kubectl get svc -n awx awx-service -o json
+      register: svc_stats_info
+           
+    - name: Verify if svc is up and running
+      assert:
+        that:
+          - "'Error from server (NotFound):' not in svc_stats_info.stdout"
+        success_msg: "{{ svc_stat_success_msg }}"
+        fail_msg: "{{ svc_stat_fail_msg }}"
+             
+    - name: Fetch Cluster IP from svc
+      shell: |
+          kubectl get svc -n awx -o json | jq '.items[] | select(.metadata.name == "awx-service") | "\(.spec.clusterIP)"'
+      register: cluster_ip_info
+           
+    - name: Check if connection to svc Cluster IP is enabled
+      uri:
+        url: http://{{ cluster_ip_info.stdout[1:-1] }}
+        follow_redirects: none
+        method: GET
+      ignore_errors: yes
+      register: cluster_ip_conn
+           
+    - name: Verify connection to svc cluster is working
+      assert:
+        that:
+          - cluster_ip_conn.status == 200
+        success_msg: "{{ svc_conn_success_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"
+        fail_msg: "{{ svc_conn_fail_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"

+ 47 - 0
control_plane/test/test_eth_mtu.yml

@@ -0,0 +1,47 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Get running config and reload PS
+  hosts: ethernet
+  connection: network_cli
+  gather_facts: no
+  collections:
+   - dellemc.os10
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+  tasks:
+   - name: Set facts
+     set_fact:
+       ansible_ssh_user: "{{ username }}"
+       ansible_ssh_pass: "{{ password }}"
+     tags: mtu,reload
+
+   - name: View running configurations
+     dellos10_command:
+       commands: show interface ethernet {{ validation_port }}
+     register: var1
+     tags: mtu
+
+   - name: Print config
+     debug:
+       msg: "{{ var1 }}"
+     tags: mtu
+
+   - name: Reload switch
+     dellos10_command:
+       commands: 
+          - command: 'reload'
+            prompt: '\[confirm yes/no\]:?$'
+            answer: 'yes'
+     tags: reload

+ 346 - 0
control_plane/test/test_ethernet_config.yml

@@ -0,0 +1,346 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+---
+# Testacase OMNIA_1.1_EF_TC_007
+# Execute ethernet.yml with both valid Global and interface configs in ethernet_config.yml
+- name: OMNIA_1.1_EF_TC_007
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_007
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Back up of ethernet_config.yml
+      copy:
+        src: "{{ ethernet_config_dir }}"
+        dest: "{{ ethernet_config_backup_dir }}"
+        mode: "{{ file_perm }}"
+      tags: TC_007
+
+    - name: Executing ethernet role with default ethernet_config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"      
+
+    - block:
+       - name: Validate default flow
+         assert:
+           that:
+             - ethernet_success_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+    - name: Set MTU of port {{ port_num }}
+      lineinfile:
+       dest: "{{ ethernet_config_dir }}"
+       insertbefore: "{{ search_line }}"
+       line: "{{ add_mtu_line }}"
+    
+    - name: Executing ethernet role with default ethernet_config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - name: Getting MTU of ethernet {{ validation_port }}
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'mtu'
+      changed_when: false
+      register: mtu_out
+      tags: TC_007,TC_002
+       
+    - name: Validate role exec output pre and post MTU addition
+      assert:
+        that:          
+          - validate_mtu_line in mtu_out.stdout
+        success_msg: "{{ success_message }}"
+        fail_msg: "{{ fail_case }}"
+      changed_when: false
+      failed_when: false
+      tags: TC_007
+
+# Testacase OMNIA_1.1_EF_TC_005
+# Execute ethernet.yml with save_config set to False
+- name: OMNIA_1.1_EF_TC_005
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_005
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Reload switch
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'reload'
+      changed_when: false
+    
+    - name: Pausing for switch to come up
+      pause:
+        minutes: "{{ time_to_pause }}"
+        
+    - name: Getting MTU of ethernet {{ validation_port }}
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'mtu'
+      changed_when: false
+      register: mtu_out
+      
+    - block:
+       - name: Validate that MTU is changed
+         assert:
+           that:
+             - validate_mtu_line not in mtu_out.stdout
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+         failed_when: false
+                
+# Testacase OMNIA_1.1_EF_TC_006
+# Execute ethernet.yml with save_config set to True
+- name: OMNIA_1.1_EF_TC_006
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_006
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Set save_changes_to_startup to True in ethernet_vars
+      ansible.builtin.replace:
+        dest: "{{ ethernet_config_dir }}"
+        regexp: 'save_changes_to_startup: false'
+        replace: 'save_changes_to_startup: True'
+        
+    - name: Execute network_ethernet role as port 4 has mtu set in ethernet_vars
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - name: Reload switch
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'reload'
+      changed_when: false
+    
+    - name: Pausing for switch to come up
+      pause:
+        minutes: "{{ time_to_pause }}"
+        
+    - name: Getting MTU of ethernet {{ validation_port }}
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'mtu'
+      changed_when: false
+      register: mtu_out
+    
+    - block:
+       - name: Validate that MTU is changed
+         assert:
+           that:
+             - validate_mtu_line in mtu_out.stdout
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_010
+# Execute ethernet.yml with invalid Global and correct interface configs in ethernet_config.yml
+- name: OMNIA_1.1_EF_TC_010
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_010
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Making interface config invalid
+      lineinfile:
+        path: "{{ ethernet_config_dir }}"
+        insertafter: 'os10_config:'
+        line: 'gibberish inserted'
+      tags: TC_007
+
+    - name: Executing ethernet role with invalid global config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+
+    - block:
+       - name: Validate role exec output
+         assert:
+           that:
+             - ethernet_fail_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+
+# Testcase OMNIA_1.1_EF_TC_009
+# Validation of ethernet default configuration
+- name: OMNIA_1.1_EF_TC_009
+  hosts: ethernet
+  gather_facts: false
+  tags: VERIFY_OMNIA_01
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml    
+  tasks:
+    - name: Executing ethernet role
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}" 
+    
+    - block:
+       - name: Validate default flow
+         assert:
+           that:
+             - ethernet_success_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+         
+# Testcase OMNIA_1.1_EF_TC_011
+# Execute ethernet.yml with valid Global  and incorrect interface configs in ethernet_config.yml 
+- name: OMNIA_1.1_EF_TC_011
+  hosts: ethernet
+  gather_facts: false
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Making interface config invalid
+      lineinfile:
+        path: "{{ ethernet_config_dir }}"
+        insertafter: 'os10_interface:'
+        line: 'gibberish inserted'
+        
+    - name: Executing ethernet role with invalid interface config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+
+    - block:
+       - name: Validate role exec output
+         assert:
+           that:
+             - ethernet_fail_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"      
+
+
+# Testcase OMNIA_1.1_EF_TC_008
+# Execute ethernet.yml with only Global and no interface configs in ethernet_config.yml 
+- name: OMNIA_1.1_EF_TC_008
+  hosts: ethernet
+  gather_facts: false
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Retrieving ethernet_config backup
+      copy:
+        src: "{{ ethernet_config_backup_dir }}"
+        dest: "{{ ethernet_config_dir }}"
+        mode: "{{ file_perm }}"
+      tags: TC_008
+    
+    - name: Removing interface config from ethernet_config
+      ansible.builtin.command: sed -i '22,117d' "{{ ethernet_config_dir }}"
+      args:
+       warn: no
+      changed_when: false
+      tags: TC_008
+      
+    - name: Executing ethernet role with no interface config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - block:
+       - name: Validate default flow
+         assert:
+           that:
+             - ethernet_success_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+    - name: Restoring original ethernt_config
+      copy:
+        src: "{{ ethernet_config_backup_dir }}"
+        dest: "{{ ethernet_config_dir }}"
+        mode: "{{ file_perm }}"
+      tags: TC_008
+      
+    - name: Set save_changes_to_startup to True in ethernet_vars
+      ansible.builtin.replace:
+        dest: "{{ ethernet_config_dir }}"
+        regexp: 'save_changes_to_startup: false'
+        replace: 'save_changes_to_startup: True'
+        
+    - name: Execute network_ethernet role as port 4 has mtu set in ethernet_vars
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - name: Set save_changes_to_startup to False in ethernet_vars
+      ansible.builtin.replace:
+        dest: "{{ ethernet_config_dir }}"
+        regexp: 'save_changes_to_startup: True'
+        replace: 'save_changes_to_startup: False'

+ 157 - 0
control_plane/test/test_ethernet_fact.yml

@@ -0,0 +1,157 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+---
+# Testcase OMNIA_1.1_EF_TC_002
+# Execute ethernetfacts.yml with valid IP with valid credentials in ethernet inventory group
+- name: OMNIA_1.1_EF_TC_002
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_002
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Execute ethernet_facts with valid creds and valid IP
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      tags: TC_002
+   
+    - block:
+       - name: Validate default flow with valid IP and valid credentials
+         assert:
+           that:
+             - "'successful' in job_status.status"
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_003
+# Execute ethernetfacts.yml with Invalid IP in ethernet inventory group
+- name: OMNIA_1.1_EF_TC_003
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_003
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: setting ip
+      set_fact:
+        eth_host_name: "{{ random_ip }}"
+         
+    - name: Execute ethernet_facts with random IP
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+
+    - block:
+        - name: Validate invalid IP and valid credentials
+          assert:
+            that:
+              - "'failed' in job_status.status"
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+          changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_001
+# Execute ethernetfacts.yml with no hosts in ethernet inventory group
+- name: OMNIA_1.1_EF_TC_001
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_001
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Execute ethernet_facts with no host details
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"       
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+    
+    - block:
+       - name: Validate no hosts and valid credentials
+         assert:
+           that:
+             - "'successful' in job_status.status"
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_004
+# Execute ethernetfacts.yml with valid IP in ethernet inventory group with incorrect credentials
+- name: OMNIA_1.1_EF_TC_004
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_004
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Making ethernet_credentials invalid
+      tower_credential:
+        name: "ethernet_credential"
+        credential_type: "Machine"
+        inputs:
+          username: "{{ invalid_username }}"
+       
+    - name: Execute ethernet_facts with invalid credentials
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+     
+    - block:
+       - name: Validate valid IP and invalid credentials
+         assert:
+           that:
+             - "'failed' in job_status.status"
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+         
+    - name: Set credentials back to default
+      tower_credential:
+        name: "ethernet_credential"
+        credential_type: "Machine"
+        inputs:
+          username: "{{ username }}"
+          password: "{{ password }}"

+ 5 - 0
control_plane/test/test_ethernet_inventory

@@ -0,0 +1,5 @@
+[ethernet]
+1.2.3.4
+
+[ethernet:vars]
+ansible_network_os= dellemc.os10.os10

Файловите разлики са ограничени, защото са твърде много
+ 1395 - 0
control_plane/test/test_idrac.yml


+ 2 - 0
control_plane/test/test_idrac_inventory

@@ -0,0 +1,2 @@
+[all]
+10.10.10.10

+ 197 - 0
control_plane/test/test_idrac_validation.yml

@@ -0,0 +1,197 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---  
+- block: 
+
+   - name: Include idrac vars
+     include_vars: "../input_params/idrac_vars.yml"
+  
+   - name: Export SCP configuration to local file
+     idrac_server_config_profile:
+      idrac_ip: "{{ inventory_hostname }}"
+      idrac_user: "{{ idrac_username }}"
+      idrac_password: "{{ idrac_password }}"
+      share_name: "{{ playbook_dir }}"
+      command: "export"
+      scp_file: "{{ inventory_hostname }}_scp.xml"
+      job_wait: true
+     run_once: true
+         
+   - name: Fetch the SCP configuration
+     command: cat "{{ inventory_hostname }}_scp.xml"
+     register: config_file
+           
+   - name: Testcase to verify the boot mode
+     assert:
+      that: 
+        - config_file.stdout | regex_search( bootmode_regex1 )
+      fail_msg: " {{ bootmode_check_fail_msg }}"
+      success_msg: "{{ bootmode_check_success_msg }}"
+     ignore_errors: true
+         
+   - name: Testcase to verify the SNMP configuration
+     assert:
+      that: 
+       - config_file.stdout | regex_search(snmp_regex1)
+      fail_msg: " {{ snmp_check_fail_msg }}"
+      success_msg: "{{ snmp_check_success_msg }}"
+     ignore_errors: true
+     
+   - name: Testcase to verify the system profile value Performance
+     assert:
+      that: 
+       - config_file.stdout | regex_search(sysprofile_regex1)
+      fail_msg: " {{ sysprofile_check_fail_msg }}"
+      success_msg: "{{ sysprofile_check_success_msg }}"
+     ignore_errors: true
+     when: idrac_system_profile == sysprofile_value1
+          
+   - name: Testcase to verify the system profile value PerformancePerWatt(DAPC)
+     assert:
+      that: 
+       - config_file.stdout | regex_search(sysprofile_regex2)
+      fail_msg: " {{ sysprofile_check_fail_msg }}"
+      success_msg: "{{ sysprofile_check_success_msg }}"
+     ignore_errors: true
+     when: idrac_system_profile == sysprofile_value2
+
+   - name: Testcase to verify the system profile value WorkstationPerformance
+     assert:
+      that: 
+       - config_file.stdout | regex_search(sysprofile_regex3)
+      fail_msg: " {{ sysprofile_check_fail_msg }}"
+      success_msg: "{{ sysprofile_check_success_msg }}"
+     ignore_errors: true
+     when: idrac_system_profile == sysprofile_value3
+
+   - name: Testcase to verify the system profile value PerformancePerWatt(OS)
+     assert:
+      that: 
+       - config_file.stdout | regex_search(sysprofile_regex4)
+      fail_msg: " {{ sysprofile_check_fail_msg }}"
+      success_msg: "{{ sysprofile_check_success_msg }}"
+     ignore_errors: true
+     when: idrac_system_profile == sysprofile_value4  
+
+   - name: Testcase to verify the pxe device status
+     assert:
+      that: 
+       - config_file.stdout | regex_search(pxedevice)
+      fail_msg: " {{ pxedevice_check_fail_msg }}"
+      success_msg: "{{ pxedevice_check_success_msg }}"
+     ignore_errors: true
+     when: not (enterprise_license or datacenter_license)
+     
+   - name: Initialized RAID status
+     set_fact:
+       raid_type: false
+               
+   - name: Get iDRAC info details
+     idrac_system_info:
+      idrac_ip: "{{ inventory_hostname }}"
+      idrac_user: "{{ idrac_username }}"
+      idrac_password: "{{ idrac_password }}"
+     register: idrac_info 
+
+   - name: Set RAID status
+     set_fact:
+       raid_type: true
+     with_items: "{{ idrac_info.system_info.Controller }}"
+     loop_control:
+       index_var: my_idx3
+     when: '"RAID" in idrac_info.system_info.ControllerSensor[my_idx3].FQDD'
+                
+   - name: Testcase to verify virtual disk creation status
+     assert:
+      that:
+       - " 'omnia_vd' in idrac_info.system_info.VirtualDisk[0].Name "
+      fail_msg: "{{ vd_fail_msg }}"
+      success_msg: " {{ vd_success_msg }}"
+     ignore_errors: true
+     when: raid_type
+
+   - name: Execute get pods command
+     command: "kubectl get pods -n {{ awx_namespace }}"
+     changed_when: true
+     register: k8s_pods
+     ignore_errors: true
+     run_once: true
+     
+   - name: Get awx pod 
+     set_fact:
+      awx_pods: "{{ item | regex_search(awx_pod_regex) | trim  }}"
+     with_items: 
+       - "{{ k8s_pods.stdout_lines }}"
+     run_once: true
+     ignore_errors: true
+     when: item | regex_search(awx_pod_item_regex)
+
+   - name: Get awx cluster ip
+     shell: "kubectl get svc awx-ui -n {{ awx_namespace }} -o jsonpath='{.spec.clusterIP}'"
+     register: awx_cluster_ip
+     changed_when: false
+     ignore_errors: true
+
+   - name: Get AWX admin password
+     shell: "kubectl get secret awx-admin-password -n {{ awx_namespace }} -o jsonpath='{.data.password}' | base64 --decode"
+     register: awx_admin_password
+     changed_when: false
+     ignore_errors: true
+          
+   - name: Execute awx get inventory hosts command
+     shell: "awx --conf.host http://{{ awx_cluster_ip.stdout }}:8052 --conf.username admin --conf.password {{ awx_admin_password.stdout }} --conf.insecure hosts list --inventory {{ provisioned_inventory_name }} -f human --filter 'name'"
+     changed_when: true
+     register: idrac_hosts
+     run_once: true
+     ignore_errors: true
+               
+   - name: Testcase to verify IP added to provisioned_idrac_inventory
+     assert:
+      that: 
+        - idrac_hosts.stdout | regex_search( inventory_hostname )
+      fail_msg: "{{ provisioned_ip_fail_msg }}"
+      success_msg: "{{ provisioned_ip_success_msg }}"
+     ignore_errors: true 
+          
+   - name: Update firmware
+     idrac_firmware:
+      idrac_ip: "{{ inventory_hostname }}"
+      idrac_user: "{{ idrac_username }}"
+      idrac_password: "{{ idrac_password }}"
+      share_name: "/var/nfs_repo/dellupdates"
+      reboot: false
+      job_wait: true
+      apply_update: False
+      catalog_file_name: "Catalog.xml"
+     register: idrac_firmware
+     when: firmware_update_required
+
+   - name: Checking firmware update status
+     assert:
+      that:
+        - idrac_firmware.msg in firmware_status
+      fail_msg: "{{ firmware_fail_msg }}"
+      success_msg: "{{ firmware_success_msg }}"
+     ignore_errors: true
+     when: firmware_update_required  
+     
+   - name: Remove the scp file
+     file:
+       path: "{{ inventory_hostname }}_scp.xml"
+       state: absent
+     run_once: true             
+      
+  rescue:
+    - debug:
+       msg: "{{ failed_msg }}"

Файловите разлики са ограничени, защото са твърде много
+ 1220 - 0
control_plane/test/test_powervault.yml


+ 135 - 0
control_plane/test/test_prepare.yml

@@ -0,0 +1,135 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- block:
+
+    - name: Get awx-service Cluster-IP
+      command: "kubectl get svc awx-service -n {{ awx_namespace }} -o jsonpath='{.spec.clusterIP}'"
+      register: awx_cluster_ip
+      changed_when: false
+
+    - name: Get AWX admin password
+      shell: "kubectl get secret awx-admin-password -n {{ awx_namespace }} -o jsonpath='{.data.password}' | base64 --decode"
+      register: awx_admin_password
+      changed_when: false
+
+    - name: Set IP and password
+      set_fact:
+        awx_ip: 'http://{{ awx_cluster_ip.stdout }}'
+        admin_password: "{{ awx_admin_password.stdout }}"
+      no_log: true
+
+    - name: Check if {{ tower_config_file_path }} file is encrypted
+      command: cat {{ tower_config_file_path }}
+      changed_when: false
+      no_log: true
+      register: config_content
+      run_once: true
+
+    - name: Decrpyt {{ tower_config_file_path }}
+      command: >-
+        ansible-vault decrypt {{ tower_config_file_path }}
+        --vault-password-file {{ tower_vault_file_path }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      no_log: true
+      changed_when: false
+      run_once: true
+
+    - name: Change tower_config file permissions
+      file:
+        path: "{{ tower_config_file_path }}"
+        mode: "{{ file_perm }}"
+
+    - name: Create an inventory
+      tower_inventory:
+        name: "{{ inventory_name }}"
+        organization: "{{ organization_name }}"
+        tower_config_file: "{{ tower_config_file_path }}"
+        state: present
+
+    - name: Create a host with inventory_hostname
+      tower_host:
+        name: "{{ inventory_hostname }}"
+        inventory: "{{ inventory_name }}"
+        state: present
+        tower_config_file: "{{ tower_config_file_path }}"
+      when: inventory_hostname is defined
+
+    - name: Create a job template
+      awx.awx.tower_job_template:
+        name: "{{ job_template_name }}"
+        job_type: "run"
+        organization: "{{ organization_name }}"
+        inventory: "{{ inventory_name }}"
+        project: "{{ project_name }}"
+        playbook: "{{ playbook_path }}"
+        credentials:
+          - "{{ item.credential }}"
+        state: present
+        tower_config_file: "{{ tower_config_file_path }}"
+      loop: "{{ job_template_details }}"
+      when: item.name == template_name
+
+    - name: Change file permissions
+      file:
+        path: "../input_params/login_vars.yml"
+        owner: root
+        mode: "{{ file_perm }}"
+
+    - name: Launch a job
+      awx.awx.tower_job_launch:
+        job_template: "{{ job_template_name }}"
+        wait: yes
+        tower_config_file: "{{ tower_config_file_path }}"
+      failed_when: false
+      register: job_status
+
+    - name: Delete host created with inventory_hostname
+      tower_host:
+        name: "{{ inventory_hostname }}"
+        inventory: "{{ inventory_name }}"
+        state: absent
+        tower_config_file: "{{ tower_config_file_path }}"
+      when: inventory_hostname is defined
+
+    - name: Delete an inventory
+      tower_inventory:
+        name: "{{ inventory_name }}"
+        organization: "{{ organization_name }}"
+        state: absent
+        tower_config_file: "{{ tower_config_file_path }}"
+      when: delete_status
+
+    - name: Delete  a job template
+      awx.awx.tower_job_template:
+        name: "{{ job_template_name }}"
+        state: absent
+        tower_config_file: "{{ tower_config_file_path }}"
+      when: delete_status
+
+    - name: Encrypt {{ tower_config_file_path }}
+      command: >-
+        ansible-vault encrypt {{ tower_config_file_path }}
+        --vault-password-file {{ tower_vault_file_path }}
+      changed_when: false
+
+    - name: Change file permissions
+      file:
+        path: "{{ tower_config_file_path }}"
+        owner: root
+        mode: "{{ file_perm }}"      
+
+  rescue:
+    - debug:
+       msg: "{{ failed_msg }}"

+ 2 - 0
control_plane/test/test_pv_inventory

@@ -0,0 +1,2 @@
+[powervault]
+192.26.0.1

+ 150 - 0
control_plane/test/test_vars/base_vars.yml

@@ -0,0 +1,150 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Path to directory hosting ansible config file (ansible.cfg file)
+# Default value is /etc/ansible
+# This directory is on the host running ansible, if ansible is installed using dnf
+# If ansible is installed using pip, this path should be set
+ansible_conf_file_path: /etc/ansible
+
+# This variable is used to enable ethernet switch configuration
+# It accepts boolean values "true" or "false". 
+# By default its value is "false".
+# If ethernet switch support is needed set this to "true"
+ethernet_switch_support: true
+
+# This variable is used to enable infiniband switch configuration
+# It accepts boolean values "true" or "false". 
+# By default its value is "false".
+# If infiniband configuration is needed set this to "true"
+ib_switch_support: true
+
+# This variable is used to enable powervault configuration
+# It accepts boolean values "true" or "false". 
+# By default its value is "false".
+# If powervault configuration is needed set this to "true"
+powervault_support: false
+
+# The nic/ethernet card that will be connected to the public internet.
+# Default value of nic is eno2
+public_nic: "eno2"
+
+# Kubernetes pod network CIDR for appliance k8s network
+# Make sure this value does not overlap with any of the host networks.
+# Default value is "192.168.0.0/16"
+appliance_k8s_pod_net_cidr: "192.168.0.0/16"
+
+### Usage: provision_idrac, network_ib, network_ethernet, powervault_me4 ###
+
+# The trap destination IP address is the IP address of the SNMP Server where the trap will be sent
+# If this variable is left blank, it means SNMP will be disabled
+# Provide a valid SNMP server IP
+snmp_trap_destination: ""
+
+# Provide the snmp community name needed
+# By default this is set to "public"
+snmp_community_name: "public"
+
+### Usage: webui_awx ###
+
+# Organization name that is created in AWX.
+# The default value is “DellEMC”
+awx_organization: "DellEMC"
+
+### Usage: provision_cobbler, provision_idrac ###
+
+# This variable is used to set node provisioning method
+# It accepts values: idrac, pxe
+# Default value is "idrac"
+# If provisioning needs to be done through cobbler, set it to "pxe"
+# If idrac license is not present, provisioning mode will be set to "pxe"
+provision_method: "idrac"
+
+# This is the timezone that will be set during provisioning of OS
+# Available timezones are provided in control_plane/common/files/timezone.txt
+# Default timezone will be "GMT"
+# Some of the other available timezones are EST,CET,MST,CST6CDT,PST8PDT
+timezone: "GMT"
+
+# This is the language that will be set during provisioning of the OS
+# Default language supported is "en-US"
+language: "en-US"
+
+# This is the path where the user has to place the iso image that needs to be provisioned in target nodes.
+# The iso file should be CentOS7-2009-minimal edition.
+# Other iso files are not supported.
+# Mandatory value required
+iso_file_path: "/root/CentOS-7-x86_64-Minimal-2009.iso"
+
+# Default lease time that will be used by dhcp
+# Its unit is seconds
+# Min: 21600 seconds
+# Default: 86400 seconds
+# Max: 31536000 seconds
+# Mandatory value required
+default_lease_time: "86400"
+
+### Usage: control_plane_device ###
+
+# The nic/ethernet card that needs to be connected to provision 
+# the fabric, idrac and powervault.
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is eno1
+mngmnt_network_nic: "eno1"
+
+# The dhcp range for assigning the IPv4 address
+# Example: 172.17.0.1
+# Mandatory value required
+mngmnt_network_dhcp_start_range: "172.19.0.101"
+mngmnt_network_dhcp_end_range: "172.19.0.200"
+
+# The mapping file consists of the MAC address and its respective IP address and Hostname.
+# The format of mapping file should be MAC,Hostname,IP and must be a CSV file.
+# Eg: xx:yy:zz:aa:bb,172.17.0.5
+# A template for mapping file exists in omnia/examples and is named as mapping_device_file.csv.
+# This depicts the path where user has kept the mapping file for DHCP configurations.
+mngmnt_mapping_file_path: ""
+
+### Usage: provision_cobbler ###
+
+# The nic/ethernet card that needs to be connected to provision the OS of bare metal servers
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is eno3
+host_network_nic: "eno3"
+
+# The dhcp range for assigning the IPv4 address
+# Example: 172.17.0.1
+# Mandatory value required
+host_network_dhcp_start_range: "172.17.0.101"
+host_network_dhcp_end_range: "172.17.0.200"
+
+# The mapping file consists of the MAC address and its respective IP address and Hostname.
+# The format of mapping file should be MAC,Hostname,IP and must be a CSV file.
+# Eg: xx:yy:zz:aa:bb,server,172.17.0.5,Group(if any)
+# A template for mapping file exists in omnia/examples and is named as mapping_host_file.csv.
+# This depicts the path where user has kept the mapping file for DHCP configurations.
+host_mapping_file_path: ""
+
+### Usage: control_plane_ib ###
+
+# The nic/ethernet card that needs to be connected to configure infiniband switch
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is ib0
+ib_network_nic: "ib0"
+
+# The dhcp range for assigning the IPv4 address
+# Example: 172.17.0.1
+ib_network_dhcp_start_range: "172.25.0.101"
+ib_network_dhcp_end_range: "172.25.0.200"

+ 81 - 0
control_plane/test/test_vars/login_vars.yml

@@ -0,0 +1,81 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+### Usage: provision_cobbler, provison_idrac ###
+
+# Password used while deploying OS on bare metal servers.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+# Mandatory value required
+provision_password: "test@123"
+
+### Usage: provision_cobbler ###
+
+# Password used for cobbler
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+# Mandatory value required
+cobbler_password: "test@123"
+
+### Usage: provision_idrac ###
+
+# The username for idrac
+# The username must not contain -,\, ',"
+# Mandatory value required
+idrac_username: "root"
+
+# Password used for idrac
+# The password must not contain -,\, ',"
+# Mandatory value required
+idrac_password: "calvin"
+
+### Usage: webui_awx ###
+
+# Password used for awx UI
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+#awx_password: ""
+
+### Usage: network_ethernet ###
+
+# The username for ethernet switch
+# The username must not contain -,\, ',"
+ethernet_switch_username: "admin"
+
+# Password used for ethernet switch
+# The password must not contain -,\, ',"
+ethernet_switch_password: "admin"
+
+### Usage: network_ib ###
+
+# The username for infiniband switch
+# The username must not contain -,\, ',"
+ib_username: "admin"
+
+# Password used for infiniband switch
+# The password must not contain -,\, ',"
+ib_password: "admin"
+
+### Usage: powervault_me4 ###
+
+# The username for powervault_me4
+# The username must not contain -,\, ',"
+powervault_me4_username: "manage"
+
+# Password used for powervault_me4
+# The password should have atleast one uppercase character, one lowercase character,
+# one numeric character and one non-alphanumeric character.
+# The password must not contain -,\, ',", . , < , comma(,)
+powervault_me4_password: "Test@123"

+ 94 - 0
control_plane/test/test_vars/test_control_plane_vars.yml

@@ -0,0 +1,94 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+#usage: test_control_plane_validation.yml
+
+port_no: 22
+os_name: CentOS
+os_version: '8.4'
+internet_status: "Failed. No Internet connection. Make sure network is up."
+check_os_success_msg: "OS and Version are supported"
+check_os_fail_msg: "Unsupported OS or OS version. OS should be {{ os_name }} and Version should be {{ os_version }} or more"
+
+input_params_folder: "../input_params/"
+control_plane_dir: "../"
+control_plane_validation_script_path: test_control_plane_validation.yml
+
+input_files_tc01:
+  - "test_vars/base_vars.yml"
+  - "test_vars/login_vars.yml"
+
+input_files_tc02:
+  - "test_vars/login_vars.yml"
+
+input_config_check_success_msg: "control_plane.yml validation passed"
+input_config_check_fail_msg: "control_plane.yml validation failed"
+
+install_package_success_msg: "{{item}} is installed"
+install_package_fail_msg: "{{item}} is not installed"
+login_vars_filename: "../input_params/login_vars.yml"
+login_vars_fail_msg: "Login vars is not encrypted"
+login_vars_success_msg: "Login vars is encrypted"
+
+fw_update_false: "firmware_update_required: false"
+fw_update_true: "firmware_update_required: true"
+fw_success_validation: "Validation Success for firmware update"
+fw_fail_validation: "Validation Failed for firmware update"
+docker_success_validation: "Docker Validated successfully"
+docker_fail_validation: "Docker not validated"
+
+test_package: 'jq'
+check_pods_success_msg: "Pod is running"
+check_pods_fail_msg: "Pods is not running"
+nfs_share_success_msg: "NFS Server is running"
+nfs_share_fail_msg: "NFS Server is not running"
+
+nfs_mount_Path: "/var/nfs_repo"
+nfs_mount_success_msg: "NFS repo is mounted"
+nfs_mount_fail_msg: "NFS repo is not mounted"
+check_iso_path: '/var/nfs_repo/unattended_centos7.iso'
+check_iso_success_msg: "ISO is present in the NFS repo"
+check_iso_fail_msg: "ISO is not present in the NFS repo"
+
+pod_service_check_fail_msg: "Service is not running"
+pod_service_check_success_msg: "Service is up and running"
+network_config_pod_success_msg: "Network-Config Pod is running"
+network_config_pod_fail_msg: "Network-Config Pod is not running"
+awx_pod_success_msg: "awx pod is up and running."
+awx_pod_fail_msg: "awx pod is not running"
+pvc_stat_success_msg: "pvc stat is running"
+pvc_stat_fail_msg: "pvc stat is not running"
+svc_stat_success_msg: "svc stat is running"
+svc_stat_fail_msg: "svc stat is not running"
+svc_conn_success_msg: "Connection to svc is successful at"
+svc_conn_fail_msg: "Connection to svc failed at: "
+cobbler_pod_success_msg: "Cobbler service is running"
+cobbler_pod_fail_msg: "Cobbler service is not running"
+subnet_manager_success_msg: "Subnet Manager is running"
+subnet_manager_fail_msg: "Subnet Manager is not running"
+cron_jobs_success_msg: "Cron jobs are running"
+cron_jobs_fail_msg: "Cron jobs are not running"
+container_info: "CONTAINER_RUNNING"
+ethernet_true: "ethernet_switch_support: true"
+ethernet_false: "ethernet_switch_support: false"
+powervault_true: "powervault_support: true"
+powervault_false: "powervault_support: false"
+infiniband_true: "ib_switch_support: true"
+infiniband_false: "ib_switch_support: false"
+# Update
+docker_user: "User"
+docker_password: "Password"
+valid_docker_creds: "Credentials are valid"
+invalid_docker_creds: "Please input valid docker username and password in test_control_plane_vars.yml"

+ 56 - 0
control_plane/test/test_vars/test_ethernet_vars.yml

@@ -0,0 +1,56 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+---
+# Usage : test_ethernet_facts.yml
+failed_msg: "Unexpected scenario"
+success_message: "Execution successful"
+eth_inventory_name: "ethernet_inventory"
+eth_host_name: "100.96.23.241"
+fact_template_value: "ethernet_template"
+fact_job_name: "ethernet_template"
+eth_facts_playbook_path: "control_plane/tools/ethernet_facts.yml"
+awx_script_path: "test_prepare.yml"
+random_ip: 100.100.100.100
+invalid_username: "invalid_username"
+username: admin
+password: admin
+
+# Usage : test_ethernet_config.yml
+ethernet_dir: "ethernet.yml"
+ethernet_config_dir: "../input_params/ethernet_vars.yml"
+ethernet_config_backup_dir: "ethernet_config_backup.yml"
+get_mtu_dir: "test_eth_mtu.yml"
+appliance_dir: "/root/ethernet/control_plane"
+fail_case: "Expected error, please check the configurations"
+sed_condition: '/Port 4/a mtu2345'
+eth_template_value: "ethernet_template"
+eth_job_name: "ethernet_template"
+eth_playbook_path: "control_plane/ethernet.yml"
+inventory_dir: "test_ethernet_inventory"
+login_vars_path: "../input_params/login_vars.yml"
+login_vars_vault_path: "../input_params/.login_vault_key"
+tower_config_file_path: "../roles/webui_awx/files/.tower_cli.cfg"
+tower_vault_file_path: "../roles/webui_awx/files/.tower_vault_key"
+file_perm: '0644'
+
+# Usage : test_eth_mtu.yml, test_ethernet_config.yml
+validation_port: 1/1/4:1
+port_num: 4
+search_line: "    ethernet 1/1/5:"
+add_mtu_line: "      mtu: 2345"
+time_to_pause: 4
+validate_mtu_line: "MTU 2345 bytes"
+ethernet_success_msg: "successful"
+ethernet_fail_msg: "failed"

+ 75 - 0
control_plane/test/test_vars/test_idrac_vars.yml

@@ -0,0 +1,75 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Usage: test_prepare.yml
+idrac_inventory_name: "test_idrac_inventory"
+template_value: 'idrac_template'
+job_name: "test_idrac_template"
+idrac_playbook_path: "control_plane/idrac.yml"
+tower_config_file_path: "../roles/webui_awx/files/.tower_cli.cfg"
+tower_vault_file_path: "../roles/webui_awx/files/.tower_vault_key"
+file_perm: '0644'
+
+# Usage: test_idrac.yml
+validation_script_path: "test_idrac_validation.yml"
+awx_script_path: "test_prepare.yml"
+temp_scp_path: "temp_scp.xml"
+idrac_var_path: "../input_params/idrac_vars.yml"
+login_vars_path: "../input_params/login_vars.yml"
+login_vars_vault_path: "../input_params/.login_vault_key"
+vd_name: "omnia_vd"
+bootmode_attr_name: 'Name="BootMode"'
+sysprofile_attr_name: 'Name="SysProfile"'
+snmp_attr_name: 'Name="SNMPAlert.1#State"'
+sysprofile: "idrac_system_profile:"
+sysprofile_params1: 'idrac_system_profile: "Performance"'
+sysprofile_params2: 'idrac_system_profile: "PerformancePerWatt(DAPC)"'
+sysprofile_params3: 'idrac_system_profile: "WorkstationPerformance"'
+sysprofile_params4: 'idrac_system_profile: "PerformancePerWatt(OS)"'
+sysprofile_value1: "Performance"
+sysprofile_value2: "PerformancePerWatt(DAPC)"
+sysprofile_value3: "WorkstationPerformance"
+sysprofile_value4: "PerformancePerWatt(OS)"
+
+# Usage: test_idrac_validation.yml
+provisioned_inventory_name: "provisioned_idrac_inventory"
+awx_namespace: "awx"
+awx_pod_regex: 'awx-([A-Za-z0-9]{10})-([A-Za-z0-9]{5})'
+awx_pod_item_regex: "awx-([A-Za-z0-9]{10})-([A-Za-z0-9]{5})"
+bootmode_regex1: '<Attribute Name="BootMode">Uefi</Attribute>'
+bootmode_regex2: '<Attribute Name="BootMode">Bios</Attribute>'
+snmp_regex1: '<Attribute Name="SNMPAlert.1#State">Enabled</Attribute>'
+snmp_regex2: '<Attribute Name="SNMPAlert.1#State">Disabled</Attribute>'
+sysprofile_regex1: '<Attribute Name="SysProfile">PerfOptimized</Attribute>'
+sysprofile_regex2: '<Attribute Name="SysProfile">PerfPerWattOptimizedDapc</Attribute>'
+sysprofile_regex3: '<Attribute Name="SysProfile">PerfWorkStationOptimized</Attribute>'
+sysprofile_regex4: '<Attribute Name="SysProfile">PerfPerWattOptimizedOs</Attribute>'
+pxedevice: '<Attribute Name="PxeDev1EnDis">Enabled</Attribute>'
+firmware_status: "The catalog in the repository specified in the operation has the same firmware versions as currently present on the server."
+bootmode_check_fail_msg: "Boot mode configuration is failed"
+bootmode_check_success_msg: "Boot mode configuration is successful"
+snmp_check_fail_msg: "SNMP configuration failed"
+snmp_check_success_msg: "SNMP configuration is successful"
+sysprofile_check_fail_msg: "System profile configuration failed"
+sysprofile_check_success_msg: "System profile configuration is successful"
+pxedevice_check_fail_msg: "Pxe device configuration is failed"
+pxedevice_check_success_msg: "Pxe device configuration is successful"
+vd_success_msg: "Virtual disk created successfully"
+vd_fail_msg: "Virtual disk is not present"
+provisioned_ip_success_msg: "IP added to provisioned_idrac_inventory  is successfully"
+provisioned_ip_fail_msg: "IP is not added to provisioned_idrac_inventory"
+failed_msg: "Failed. Please check input parameters and try again!"
+firmware_fail_msg: "Firmware update is failed"
+firmware_success_msg: "Firmware updated is success"

+ 88 - 0
control_plane/test/test_vars/test_powervault_vars.yml

@@ -0,0 +1,88 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# vars file for test_powervault.yml file
+connectivity_success_msg: "Connection was successful"
+connectivity_failure_msg: "Connectivity failed"
+product_id_success_msg: "Product id matches the supported powervault versions"
+product_id_failure_msg: "Product id doesn't match the supported powervault versions"
+ip_validation_success_msg: "IP validation was successful"
+ip_validation_failure_msg: "IP validation failed"
+user_validation_success_msg: "Users validation was successful"
+user_validation_failure_msg: "Users validation failed"
+pool_diskgroup_config_success_msg: "Pools and disk groups addition were successful"
+pool_diskgroup_config_failure_msg: "Pools and disk groups addition failed"
+volumes_creation_success_msg: "Volumes creation was successful"
+volumes_creation_failure_msg: "Volumes creation failed"
+snmp_trap_success_msg: "SNMP trap notifications are disabled"
+snmp_trap_enable_success_msg: "SNMP trap notifications are enabled"
+snmp_trap_failure_msg: "Error in setting up the snmp parameters"
+scsi_product_id: "ME4"
+user_roles: "manage"
+user_type: "Standard"
+user_locale: "English"
+pool_a: "A"
+pool_b: "B"
+pool_type: "Virtual"
+raid1_level: "RAID1"
+raid5_level: "RAID5"
+raid6_level: "RAID6"
+raid10_level: "RAID10"
+raid_adapt_level: "ADAPT"
+disk_type_ssdsas: "SSD SAS"
+disk_type_sasmdl: "SAS MDL"
+disk_group_name_omnia_dgA01: "omnia_dgA01"
+disk_group_name_omnia_dgA02: "omnia_dgA02"
+disk_group_name_omnia_dgB01: "omnia_dgB01"
+volume_name_k8s_v1: "k8s_volume"
+volume_name_slurm_v1: "slurm_volume"
+volume_name_k8s_v2: "k8s_V2"
+volume_name_slurm_v2: "slurm_V2"
+volume_size_input_100gb: "100GB"
+volume_size_100gb: "99.9GB"
+disk_range_5_6: "0.5-6"
+disk_range_3_4: "0.3-4"
+disk_range_7_10: "0.7-10"
+disk_range_8_11: "0.8-9:0.10-11"
+disk_range_0_2: "0.0-2"
+disk_range_2_3: "0.2-3"
+disk_range_0_11: "0.0-11"
+dhcp_start_range: "192.168.0.0"
+dhcp_end_range: "192.168.0.100"
+ip_range: "192.168"
+snmp_notify_level_crit: "crit"
+snmp_notify_level_none: "none"
+snmp_destination: "100.96.22.199"
+snmp_notify_status_enable: "Enabled"
+snmp_notify_status_disable: "Disabled"
+
+login_vars_path: "../input_params/login_vars.yml"
+login_vars_vault_path: "../input_params/.login_vault_key"
+powervault_me4_var_path: "../input_params/powervault_me4_vars.yml"
+base_var_path: "../input_params/base_vars.yml"
+powervault_inventory_name: "powervault_me4_inventory"
+template_value: "powervault_me4_template"
+job_name: "test_powervault_me4_template"
+powervault_playbook_path: "control_plane/powervault_me4.yml"
+awx_script_path: "test_prepare.yml"
+failed_job_status: "failed"
+success_job_status: "successful"
+playbook_exec_success_msg: "Powervault playbook execution completed successfully"
+playbook_exec_fail_msg: "Job execution failed. Please check input parameters and try again!"
+failed_job_status_success_msg: "Execution failed. Please check AWX console for appropriate error message"
+failed_msg: "Failed. Please check input parameters and try again!"
+tower_config_file_path: "../roles/webui_awx/files/.tower_cli.cfg"
+tower_vault_file_path: "../roles/webui_awx/files/.tower_vault_key"
+file_perm: '0644'

+ 5 - 1
control_plane/tools/roles/configure_new_devices/tasks/main.yml

@@ -93,6 +93,10 @@
       changed_when: true
       no_log: true
 
+    - name: Wait for 10 mins for DHCP to assign IP to devices
+      wait_for:
+        timeout: "{{ dhcp_wait_time }}"
+
     - name: Launch idrac_template
       command: >-
         awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
@@ -101,4 +105,4 @@
       changed_when: false
       no_log: true
       when: inventory_hostname not in fetch_inventory.stdout
-  when: provisioned_idrac_inventory_name in inventory_list.stdout
+  when: provisioned_idrac_inventory_name in inventory_list.stdout

+ 2 - 1
control_plane/tools/roles/configure_new_devices/vars/main.yml

@@ -17,4 +17,5 @@
 
 # Usage: main.yml
 ethernet_template_name: "ethernet_template"
-idrac_template_name: "idrac_template"
+idrac_template_name: "idrac_template"
+dhcp_wait_time: 600

+ 2 - 2
control_plane/tools/roles/idrac_2fa/tasks/configure_smtp.yml

@@ -55,7 +55,7 @@
   register: test_email_check
   retries: 5
   no_log: true
-  ignore_errors: true
+  failed_when: false
   until: not test_email_check.failed
 
 - name: Check test email status
@@ -74,4 +74,4 @@
       EmailAlert.1.Enable: "Disabled"
     baseuri: "{{ inventory_hostname }}"
     username: "{{ idrac_username }}"
-    password: "{{ idrac_password }}"
+    password: "{{ idrac_password }}"

+ 11 - 2
docs/FAQ.md

@@ -112,7 +112,7 @@ Resolution:
 It is recommended that the ansible-vault view or edit commands are used and not the ansible-vault decrypt or encrypt commands.
 
 ## What to do if the LC is not ready?
-* Ensure the LC is in a ready state for all the servers.
+* Verify the state of the LC in all servers by running `racadm getremoteservicesstatus`
 * Launch iDRAC template.
 
 ## What to do if the network CIDR entry of iDRAC IP in /etc/exports file is missing?
@@ -127,6 +127,15 @@ It is recommended that the ansible-vault view or edit commands are used and not
 ## Is Disabling 2FA supported by Omnia?
 * Disabling 2FA is not supported by Omnia and must be manually disabled.
 
+## Is provisioning server using BOSS controller supported by Omnia?
+* Provisioning server using BOSS controller is not supported by Omnia. It will be supported in upcoming releases.
+
 ## The provisioning of PowerEdge servers failed. How do I clean up before starting over?
 1. Delete the respective iDRAC IP addresses from the *provisioned_idrac_inventory* on the AWX UI or delete the *provisioned_idrac_inventory* to delete the iDRAC IP addresses of all the servers in the cluster.
-2. Launch the iDRAC template from the AWX UI.
+2. Launch the iDRAC template from the AWX UI.
+
+## What to do when WARNING message regarding older firmware displayed during idrac_template execution and idrac_template task failed?
+Potential Cause: Older firmware version in PowerEdge servers. Omnia supports only iDRAC 8 based Dell EMC PowerEdge Servers with firmware versions 2.75.75.75 and above and iDRAC 9 based Dell EMC PowerEdge Servers with Firmware versions 4.40.40.00 and above.
+
+1. Update idrac firmware version in PowerEdge servers manually to the supported version.
+2. Re-run idrac_template.

+ 3 - 0
examples/host_mapping_file_one_touch.csv

@@ -0,0 +1,3 @@
+MAC,Hostname,IP,Component_role
+xx:yy:zz:aa:bb,server,1.2.3.4,manager
+aa:bb:cc:dd:ee,server2,10.10.11.12,nfs_node

+ 3 - 0
examples/host_mapping_file_os_provisioning.csv

@@ -0,0 +1,3 @@
+MAC,Hostname,IP
+xx:yy:zz:aa:bb,server,1.2.3.4
+aa:bb:cc:dd:ee,server2,10.10.11.12

+ 2 - 0
examples/mapping_device_file.csv

@@ -0,0 +1,2 @@
+MAC,IP
+xx:yy:zz:aa:bb,1.2.3.4

+ 0 - 2
examples/mapping_file.csv

@@ -1,2 +0,0 @@
-MAC,Hostname,IP
-xx:yy:zz:aa:bb,server,1.2.3.4

+ 1 - 1
omnia.yml

@@ -105,7 +105,7 @@
   tags: kubernetes
 
 - name: Start K8s worker servers on manager nodes
-  hosts: manager
+  hosts: manager, compute
   gather_facts: false
   roles:
     - k8s_start_services

+ 11 - 30
platforms/roles/jupyterhub/tasks/main.yml

@@ -30,33 +30,14 @@
    mode: "{{ jupyter_config_file_mode }}"
 
 - name: JupyterHub deploy
-  block:
-    - name: JupyterHub deploy
-      command: >
-        helm upgrade --cleanup-on-fail \
-        --install {{ jupyterhub_namespace }} jupyterhub/jupyterhub \
-        --namespace {{ jupyterhub_namespace }} \
-        --create-namespace \
-        --version {{ helm_chart_version }} \
-        --values {{ jupyter_config_file_dest }} \
-        --timeout {{ timeout_min_sec }}
-      register: deployment_output
-
-  rescue:
-    - name: JupyterHub deployment error
-      debug:
-        msg: "Previous JupyterHub deployment is in progress"
-      when: "'another operation (install/upgrade/rollback) is in progress' in deployment_output.stderr"
-
-    - name: Delete existing release
-      command: helm delete '{{ jupyterhub_namespace }}'
-
-    - name: JupyterHub deploy
-      command: >
-        helm upgrade --cleanup-on-fail \
-        --install {{ jupyterhub_namespace }} jupyterhub/jupyterhub \
-        --namespace {{ jupyterhub_namespace }} \
-        --create-namespace \
-        --version {{ helm_chart_version }} \
-        --values {{ jupyter_config_file_dest }} \
-        --timeout {{ timeout_min_sec }}
+  command: >
+    helm upgrade --cleanup-on-fail \
+    --install {{ jupyterhub_namespace }} jupyterhub/jupyterhub \
+    --namespace {{ jupyterhub_namespace }} \
+    --create-namespace \
+    --version {{ helm_chart_version }} \
+    --values {{ jupyter_config_file_dest }} \
+    --timeout {{ timeout_min_sec }}
+  changed_when: true
+  failed_when: false
+  register: deployment_output

+ 91 - 0
roles/cluster_validation/tasks/fetch_powervault_status.yml

@@ -0,0 +1,91 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Check tower_cli.cfg is encrypted
+  command: cat "{{ tower_config_path }}"
+  changed_when: false
+  register: awx_content
+  run_once: true
+  no_log: true
+
+- name: Decrpyt tower_cli.cfg
+  command: ansible-vault decrypt "{{ tower_config_path }}" --vault-password-file "{{ tower_vault_path }}"
+  changed_when: false
+  run_once: true
+  when: "'$ANSIBLE_VAULT;' in awx_content.stdout"
+
+- name: Fetch awx host
+  command: grep "host:" "{{ tower_config_path }}"
+  register: fetch_awx_host
+  changed_when: false
+  run_once: true
+
+- name: Fetch awx username
+  command: grep "username:" "{{ tower_config_path }}"
+  register: fetch_awx_username
+  changed_when: false
+  run_once: true
+  no_log: true
+
+- name: Fetch awx password
+  command: grep "password:" "{{ tower_config_path }}"
+  register: fetch_awx_password
+  changed_when: false
+  run_once: true
+  no_log: true
+
+- name: Set awx variables
+  set_fact:
+    awx_host: "{{ fetch_awx_host.stdout | regex_replace('host: ','') }}"
+    awx_username: "{{ fetch_awx_username.stdout | regex_replace('username: ','') }}"
+    awx_password: "{{ fetch_awx_password.stdout | regex_replace('password: ','') }}"
+  no_log: true
+
+- name: Encrypt tower_cli.cfg
+  command: ansible-vault encrypt "{{ tower_config_path }}" --vault-password-file "{{ tower_vault_path }}"
+  changed_when: false
+  run_once: true
+  when: "'$ANSIBLE_VAULT;' in awx_content.stdout"
+
+- name: Get inventory list
+  command: >-
+    awx --conf.host "{{ awx_host }}" --conf.username "{{ awx_username }}" --conf.password "{{ awx_password }}"
+    inventory list -f human --filter "name"
+  register: inventory_list
+  run_once: true
+  changed_when: false
+  no_log: true
+
+- block:
+    - name: Fetch powervault_me4_inventory
+      command: >-
+        awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+        hosts list --inventory "{{ powervault_inventory_name }}" -f human --filter "name"
+      register: fetch_inventory
+      run_once: true
+      changed_when: false
+      no_log: true
+
+    - name: Set powervault_status
+      set_fact:
+        powervault_status: true
+      when: fetch_inventory.stdout_lines[2:] | length > 0
+
+    - name: Create powervault_me4 group
+      add_host:
+        name: "{{ item | regex_replace(' ','') }}"
+        groups: "{{ powervault_group }}"
+      when: powervault_status
+      with_items: "{{ fetch_inventory.stdout_lines[2:] }}"

+ 63 - 8
roles/cluster_validation/tasks/main.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 ---
+
 - name: Perform validations
   include_tasks: validations.yml
 
@@ -20,18 +21,72 @@
 
 - name: Check if omnia is running from AWX
   block:
-    - name: Appliance status
+    - name: Initialize variables
       set_fact:
-        appliance_status: false
+        control_plane_status: false
+        powervault_status: false
+        nfs_node_status: false
 
     - name: Check AWX instance
-      command: awx-manage --version
+      command: awx --version
+      changed_when: false
+      failed_when: false
+      register: awx_version_check
+
+    - name: Check AWX hostname
+      command: hostname
+      changed_when: false
+      register: awx_hostname
 
-    - name: Update appliance status
+    - name: Set control_plane_status
       set_fact:
-        appliance_status: true
+        control_plane_status: true
+      when:
+        - not awx_version_check.failed
+        - '"awx-" in awx_hostname.stdout'
+
+    - name: Set NFS node status
+      set_fact:
+        nfs_node_status: true
+      when:
+        - control_plane_status
+        - groups['nfs_node'] | length == 1
+
+    - name: Fetch powervault status
+      include_tasks: fetch_powervault_status.yml
+      when: nfs_node_status
 
-  rescue:
+- name: omnia.yml runing on host
+  block:
     - name: Passwordless SSH status
       debug:
-        msg: "omnia.yml running on host"
+        msg: "omnia.yml running on host"
+
+    - name: Check whether ansible config file exists
+      stat:
+        path: "{{ ansible_conf_file_path }}/ansible.cfg"
+      register: ansible_conf_exists
+
+    - name: Create the directory if it does not exist
+      file:
+        path: "{{ ansible_conf_file_path }}"
+        state: directory
+        mode: "{{ file_perm }}"
+      when: not ansible_conf_exists.stat.exists
+
+    - name: Create ansible config file if it does not exist
+      copy:
+        dest: "{{ ansible_conf_file_path }}/ansible.cfg"
+        mode: "{{ file_perm }}"
+        content: |
+          [defaults]
+          log_path = /var/log/omnia.log
+      when: not ansible_conf_exists.stat.exists
+
+    - name: Set omnia.log file
+      replace:
+        path: "{{ ansible_conf_file_path }}/ansible.cfg"
+        regexp: '#log_path = /var/log/ansible.log'
+        replace: 'log_path = /var/log/omnia.log'
+      when: ansible_conf_exists.stat.exists
+  when: not control_plane_status

+ 10 - 2
roles/cluster_validation/tasks/validations.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 ---
+
 - name: Validate skip tags
   fail:
     msg: "{{ skip_tag_fail_msg }}"
@@ -27,4 +28,11 @@
   assert:
     that: "groups['compute'] | length | int >= 1"
     fail_msg: "{{ compute_group_fail_msg }}"
-    success_msg: "{{ compute_group_success_msg }}"
+    success_msg: "{{ compute_group_success_msg }}"
+
+- name: NFS group to contain exactly 1 node
+  assert:
+    that: "groups['nfs_node'] | length | int == 1"
+    fail_msg: "{{ nfs_node_group_fail_msg }}"
+    success_msg: "{{ nfs_node_group_success_msg }}"
+  when: groups['nfs_node']

+ 13 - 4
roles/cluster_validation/vars/main.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -12,7 +12,8 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 ---
-#Usage: fetch_password.yml
+
+# Usage: fetch_password.yml
 config_filename: "omnia_config.yml"
 config_vaultname: .omnia_vault_key
 min_length: 8
@@ -39,7 +40,7 @@ input_config_failure_msg: "Input parameters cannot be empty"
 login_node_required_success_msg: "Login_node_required successfully validated"
 login_node_required_fail_msg: "Failed. login_node_required can be either true or false"
 
-#Usage: validations.yml
+# Usage: validations.yml
 skip_tag_fail_msg: "Can't skip both slurm and kubernetes"
 manager_group_fail_msg: "manager group should contain exactly 1 node"
 manager_group_success_msg: "manager group check passed"
@@ -48,4 +49,12 @@ compute_group_success_msg: "compute group check passed"
 disjoint_fail_msg: "manager and compute groups should be disjoint"
 disjoint_success_msg: "manager and compute groups are disjoint"
 login_node_group_fail_msg: "Login node group should contain atleast 1 node when login_node_required is true"
-login_node_group_success_msg: "Login node group check passed when login_node_required is true"
+login_node_group_success_msg: "Login node group check passed when login_node_required is true"
+nfs_node_group_fail_msg: "nfs_node group should contain exactly 1 node"
+nfs_node_group_success_msg: "nfs_node group check passed"
+
+# Usage: fetch_powervault_status.yml
+tower_config_path: "{{ playbook_dir }}/control_plane/roles/webui_awx/files/.tower_cli.cfg"
+tower_vault_path: "{{ playbook_dir }}/control_plane/roles/webui_awx/files/.tower_vault_key"
+powervault_inventory_name: "powervault_me4_inventory"
+powervault_group: "powervault_me4"

Файловите разлики са ограничени, защото са твърде много
+ 4090 - 0
roles/k8s_start_manager/files/kube-calico.yaml


+ 63 - 376
roles/k8s_start_manager/files/kube-flannel.yaml

@@ -1,5 +1,5 @@
 ---
-apiVersion: extensions/v1beta1
+apiVersion: policy/v1beta1
 kind: PodSecurityPolicy
 metadata:
   name: psp.flannel.unprivileged
@@ -11,14 +11,14 @@ metadata:
 spec:
   privileged: false
   volumes:
-    - configMap
-    - secret
-    - emptyDir
-    - hostPath
+  - configMap
+  - secret
+  - emptyDir
+  - hostPath
   allowedHostPaths:
-    - pathPrefix: "/etc/cni/net.d"
-    - pathPrefix: "/etc/kube-flannel"
-    - pathPrefix: "/run/flannel"
+  - pathPrefix: "/etc/cni/net.d"
+  - pathPrefix: "/etc/kube-flannel"
+  - pathPrefix: "/run/flannel"
   readOnlyRootFilesystem: false
   # Users and groups
   runAsUser:
@@ -31,7 +31,7 @@ spec:
   allowPrivilegeEscalation: false
   defaultAllowPrivilegeEscalation: false
   # Capabilities
-  allowedCapabilities: ['NET_ADMIN']
+  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
   defaultAddCapabilities: []
   requiredDropCapabilities: []
   # Host namespaces
@@ -43,40 +43,40 @@ spec:
     max: 65535
   # SELinux
   seLinux:
-    # SELinux is unsed in CaaSP
+    # SELinux is unused in CaaSP
     rule: 'RunAsAny'
 ---
 kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: flannel
 rules:
-  - apiGroups: ['extensions']
-    resources: ['podsecuritypolicies']
-    verbs: ['use']
-    resourceNames: ['psp.flannel.unprivileged']
-  - apiGroups:
-      - ""
-    resources:
-      - pods
-    verbs:
-      - get
-  - apiGroups:
-      - ""
-    resources:
-      - nodes
-    verbs:
-      - list
-      - watch
-  - apiGroups:
-      - ""
-    resources:
-      - nodes/status
-    verbs:
-      - patch
+- apiGroups: ['extensions']
+  resources: ['podsecuritypolicies']
+  verbs: ['use']
+  resourceNames: ['psp.flannel.unprivileged']
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - nodes/status
+  verbs:
+  - patch
 ---
 kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: flannel
 roleRef:
@@ -106,6 +106,7 @@ data:
   cni-conf.json: |
     {
       "name": "cbr0",
+      "cniVersion": "0.3.1",
       "plugins": [
         {
           "type": "flannel",
@@ -130,31 +131,42 @@ data:
       }
     }
 ---
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1
 kind: DaemonSet
 metadata:
-  name: kube-flannel-ds-amd64
+  name: kube-flannel-ds
   namespace: kube-system
   labels:
     tier: node
     app: flannel
 spec:
+  selector:
+    matchLabels:
+      app: flannel
   template:
     metadata:
       labels:
         tier: node
         app: flannel
     spec:
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: kubernetes.io/os
+                operator: In
+                values:
+                - linux
       hostNetwork: true
-      nodeSelector:
-        beta.kubernetes.io/arch: amd64
+      priorityClassName: system-node-critical
       tolerations:
       - operator: Exists
         effect: NoSchedule
       serviceAccountName: flannel
       initContainers:
       - name: install-cni
-        image: quay.io/coreos/flannel:v0.11.0-amd64
+        image: quay.io/coreos/flannel:v0.14.0
         command:
         - cp
         args:
@@ -168,13 +180,12 @@ spec:
           mountPath: /etc/kube-flannel/
       containers:
       - name: kube-flannel
-        image: quay.io/coreos/flannel:v0.11.0-amd64
+        image: quay.io/coreos/flannel:v0.14.0
         command:
         - /opt/bin/flanneld
         args:
         - --ip-masq
         - --kube-subnet-mgr
-        - --iface=ib0
         resources:
           requests:
             cpu: "100m"
@@ -185,7 +196,7 @@ spec:
         securityContext:
           privileged: false
           capabilities:
-             add: ["NET_ADMIN"]
+            add: ["NET_ADMIN", "NET_RAW"]
         env:
         - name: POD_NAME
           valueFrom:
@@ -201,336 +212,12 @@ spec:
         - name: flannel-cfg
           mountPath: /etc/kube-flannel/
       volumes:
-        - name: run
-          hostPath:
-            path: /run/flannel
-        - name: cni
-          hostPath:
-            path: /etc/cni/net.d
-        - name: flannel-cfg
-          configMap:
-            name: kube-flannel-cfg
----
-apiVersion: extensions/v1beta1
-kind: DaemonSet
-metadata:
-  name: kube-flannel-ds-arm64
-  namespace: kube-system
-  labels:
-    tier: node
-    app: flannel
-spec:
-  template:
-    metadata:
-      labels:
-        tier: node
-        app: flannel
-    spec:
-      hostNetwork: true
-      nodeSelector:
-        beta.kubernetes.io/arch: arm64
-      tolerations:
-      - operator: Exists
-        effect: NoSchedule
-      serviceAccountName: flannel
-      initContainers:
-      - name: install-cni
-        image: quay.io/coreos/flannel:v0.11.0-arm64
-        command:
-        - cp
-        args:
-        - -f
-        - /etc/kube-flannel/cni-conf.json
-        - /etc/cni/net.d/10-flannel.conflist
-        volumeMounts:
-        - name: cni
-          mountPath: /etc/cni/net.d
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      containers:
-      - name: kube-flannel
-        image: quay.io/coreos/flannel:v0.11.0-arm64
-        command:
-        - /opt/bin/flanneld
-        args:
-        - --ip-masq
-        - --kube-subnet-mgr
-        - --iface=ib0
-        resources:
-          requests:
-            cpu: "100m"
-            memory: "50Mi"
-          limits:
-            cpu: "100m"
-            memory: "50Mi"
-        securityContext:
-          privileged: false
-          capabilities:
-             add: ["NET_ADMIN"]
-        env:
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: POD_NAMESPACE
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.namespace
-        volumeMounts:
-        - name: run
-          mountPath: /run/flannel
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      volumes:
-        - name: run
-          hostPath:
-            path: /run/flannel
-        - name: cni
-          hostPath:
-            path: /etc/cni/net.d
-        - name: flannel-cfg
-          configMap:
-            name: kube-flannel-cfg
----
-apiVersion: extensions/v1beta1
-kind: DaemonSet
-metadata:
-  name: kube-flannel-ds-arm
-  namespace: kube-system
-  labels:
-    tier: node
-    app: flannel
-spec:
-  template:
-    metadata:
-      labels:
-        tier: node
-        app: flannel
-    spec:
-      hostNetwork: true
-      nodeSelector:
-        beta.kubernetes.io/arch: arm
-      tolerations:
-      - operator: Exists
-        effect: NoSchedule
-      serviceAccountName: flannel
-      initContainers:
-      - name: install-cni
-        image: quay.io/coreos/flannel:v0.11.0-arm
-        command:
-        - cp
-        args:
-        - -f
-        - /etc/kube-flannel/cni-conf.json
-        - /etc/cni/net.d/10-flannel.conflist
-        volumeMounts:
-        - name: cni
-          mountPath: /etc/cni/net.d
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      containers:
-      - name: kube-flannel
-        image: quay.io/coreos/flannel:v0.11.0-arm
-        command:
-        - /opt/bin/flanneld
-        args:
-        - --ip-masq
-        - --kube-subnet-mgr
-        - --iface=ib0
-        resources:
-          requests:
-            cpu: "100m"
-            memory: "50Mi"
-          limits:
-            cpu: "100m"
-            memory: "50Mi"
-        securityContext:
-          privileged: false
-          capabilities:
-             add: ["NET_ADMIN"]
-        env:
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: POD_NAMESPACE
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.namespace
-        volumeMounts:
-        - name: run
-          mountPath: /run/flannel
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      volumes:
-        - name: run
-          hostPath:
-            path: /run/flannel
-        - name: cni
-          hostPath:
-            path: /etc/cni/net.d
-        - name: flannel-cfg
-          configMap:
-            name: kube-flannel-cfg
----
-apiVersion: extensions/v1beta1
-kind: DaemonSet
-metadata:
-  name: kube-flannel-ds-ppc64le
-  namespace: kube-system
-  labels:
-    tier: node
-    app: flannel
-spec:
-  template:
-    metadata:
-      labels:
-        tier: node
-        app: flannel
-    spec:
-      hostNetwork: true
-      nodeSelector:
-        beta.kubernetes.io/arch: ppc64le
-      tolerations:
-      - operator: Exists
-        effect: NoSchedule
-      serviceAccountName: flannel
-      initContainers:
-      - name: install-cni
-        image: quay.io/coreos/flannel:v0.11.0-ppc64le
-        command:
-        - cp
-        args:
-        - -f
-        - /etc/kube-flannel/cni-conf.json
-        - /etc/cni/net.d/10-flannel.conflist
-        volumeMounts:
-        - name: cni
-          mountPath: /etc/cni/net.d
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      containers:
-      - name: kube-flannel
-        image: quay.io/coreos/flannel:v0.11.0-ppc64le
-        command:
-        - /opt/bin/flanneld
-        args:
-        - --ip-masq
-        - --kube-subnet-mgr
-        - --iface=ib0
-        resources:
-          requests:
-            cpu: "100m"
-            memory: "50Mi"
-          limits:
-            cpu: "100m"
-            memory: "50Mi"
-        securityContext:
-          privileged: false
-          capabilities:
-             add: ["NET_ADMIN"]
-        env:
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: POD_NAMESPACE
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.namespace
-        volumeMounts:
-        - name: run
-          mountPath: /run/flannel
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      volumes:
-        - name: run
-          hostPath:
-            path: /run/flannel
-        - name: cni
-          hostPath:
-            path: /etc/cni/net.d
-        - name: flannel-cfg
-          configMap:
-            name: kube-flannel-cfg
----
-apiVersion: extensions/v1beta1
-kind: DaemonSet
-metadata:
-  name: kube-flannel-ds-s390x
-  namespace: kube-system
-  labels:
-    tier: node
-    app: flannel
-spec:
-  template:
-    metadata:
-      labels:
-        tier: node
-        app: flannel
-    spec:
-      hostNetwork: true
-      nodeSelector:
-        beta.kubernetes.io/arch: s390x
-      tolerations:
-      - operator: Exists
-        effect: NoSchedule
-      serviceAccountName: flannel
-      initContainers:
-      - name: install-cni
-        image: quay.io/coreos/flannel:v0.11.0-s390x
-        command:
-        - cp
-        args:
-        - -f
-        - /etc/kube-flannel/cni-conf.json
-        - /etc/cni/net.d/10-flannel.conflist
-        volumeMounts:
-        - name: cni
-          mountPath: /etc/cni/net.d
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      containers:
-      - name: kube-flannel
-        image: quay.io/coreos/flannel:v0.11.0-s390x
-        command:
-        - /opt/bin/flanneld
-        args:
-        - --ip-masq
-        - --kube-subnet-mgr
-        - --iface=ib0
-        resources:
-          requests:
-            cpu: "100m"
-            memory: "50Mi"
-          limits:
-            cpu: "100m"
-            memory: "50Mi"
-        securityContext:
-          privileged: false
-          capabilities:
-             add: ["NET_ADMIN"]
-        env:
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: POD_NAMESPACE
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.namespace
-        volumeMounts:
-        - name: run
-          mountPath: /run/flannel
-        - name: flannel-cfg
-          mountPath: /etc/kube-flannel/
-      volumes:
-        - name: run
-          hostPath:
-            path: /run/flannel
-        - name: cni
-          hostPath:
-            path: /etc/cni/net.d
-        - name: flannel-cfg
-          configMap:
-            name: kube-flannel-cfg
+      - name: run
+        hostPath:
+          path: /run/flannel
+      - name: cni
+        hostPath:
+          path: /etc/cni/net.d
+      - name: flannel-cfg
+        configMap:
+          name: kube-flannel-cfg

+ 50 - 8
roles/k8s_start_manager/tasks/main.yml

@@ -178,21 +178,63 @@
   retries: 10
   tags: install
 
+- name: Create yaml repo for setup
+  file:
+    path: "{{ yaml_repo_dir_path }}"
+    state: directory
+    mode: "{{ yaml_repo_dir_mode }}"
+  tags: init
+
+- name: Delete Calico yaml file if exists
+  file:
+    path: "{{ calico_yml_file_path }}"
+    state: absent
+  when: hostvars['127.0.0.1']['k8s_cni'] == "calico"
+  tags: init
+
+- name: Copy Calico yaml file
+  copy:
+    src: kube-calico.yaml
+    dest: "{{ calico_yml_file_path }}"
+    owner: root
+    group: root
+    mode: "{{ calico_yml_file_mode }}"
+  when: hostvars['127.0.0.1']['k8s_cni'] == "calico"
+  tags: init
+
 - name: Setup Calico SDN network
-  command: "kubectl apply -f '{{ calico_yml_url }}'"
+  command: "kubectl apply -f '{{ calico_yml_file_path }}'"
   when: hostvars['127.0.0.1']['k8s_cni'] == "calico"
   tags: init
 
-- name: Setup Flannel SDN network
-  command: "kubectl apply -f '{{ flannel_yml_url }}'"
+- name: Delete Flannel yaml file if exists
+  file:
+    path: "{{ flannel_yml_file_path }}"
+    state: absent
   when: hostvars['127.0.0.1']['k8s_cni'] == "flannel"
   tags: init
 
-- name: Create yaml repo for setup
-  file:
-    path: "{{ yaml_repo_dir_path }}"
-    state: directory
-    mode: "{{ yaml_repo_dir_mode }}"
+- name: Copy Flannel yaml file
+  copy:
+    src: kube-flannel.yaml
+    dest: "{{ flannel_yml_file_path }}"
+    owner: root
+    group: root
+    mode: "{{ flannel_yml_file_mode }}"
+  when: hostvars['127.0.0.1']['k8s_cni'] == "flannel"
+  tags: init
+
+- name: Replace flannel network cidr
+  replace:
+    path: "{{ flannel_yml_file_path }}"
+    regexp: "10.244.0.0/16"
+    replace: "{{ hostvars['127.0.0.1']['k8s_pod_network_cidr'] }}"
+  when: hostvars['127.0.0.1']['k8s_cni'] == "flannel"
+  tags: init
+
+- name: Setup Flannel SDN network
+  command: "kubectl apply -f '{{ flannel_yml_file_path }}'"
+  when: hostvars['127.0.0.1']['k8s_cni'] == "flannel"
   tags: init
 
 - name: Create service account (K8s dashboard) files

+ 6 - 2
roles/k8s_start_manager/vars/main.yml

@@ -41,6 +41,10 @@ cluster_role_binding_file_dest: /root/k8s/create_clusterRoleBinding.yaml
 
 cluster_role_binding_file_mode: 0655
 
-calico_yml_url: https://docs.projectcalico.org/manifests/calico.yaml
+calico_yml_file_path: /root/k8s/kube-calico.yaml
 
-flannel_yml_url: https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+calico_yml_file_mode: 0644
+
+flannel_yml_file_path: /root/k8s/kube-flannel.yaml
+
+flannel_yml_file_mode: 0644

+ 102 - 0
roles/k8s_start_services/tasks/check_k8s_pods.yml

@@ -0,0 +1,102 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Wait for calico pod to come to ready state
+  command: "kubectl wait --for=condition=ready -n kube-system pod -l k8s-app=calico-node --timeout=10m"
+  changed_when: false
+  register: calico_pod_status
+  failed_when: false
+  when:
+    - "'manager' in group_names"
+    - hostvars['127.0.0.1']['k8s_cni'] == "calico"
+  tags: install
+
+- name: Wait for flannel pod to come to ready state
+  command: "kubectl wait --for=condition=ready -n kube-system pod -l app=flannel --timeout=10m"
+  changed_when: false
+  register: flannel_pod_status
+  failed_when: false
+  when:
+    - "'manager' in group_names"
+    - hostvars['127.0.0.1']['k8s_cni'] == "flannel"
+  tags: install
+
+- name: Wait for nfs-client-provisioner pod to come to ready state
+  command: "kubectl wait --for=condition=ready -n default pod -l app=nfs-client-provisioner --timeout=10m"
+  changed_when: false
+  register: nfs_pod_status
+  failed_when: false
+  when:
+    - "'manager' in group_names"
+    - calico_pod_status is not failed or flannel_pod_status is not failed
+  tags: install
+
+- name: Wait for volcano-scheduler pod to come to ready state
+  command: "kubectl wait --for=condition=ready -n volcano-system pod -l app=volcano-scheduler --timeout=5m"
+  changed_when: false
+  register: volcano_pod_status
+  failed_when: false
+  when:
+    - "'manager' in group_names"
+    - nfs_pod_status is not failed
+  tags: install
+
+- name: Get K8s pods
+  command: kubectl get pods --all-namespaces
+  changed_when: false
+  register: k8s_pods
+  when: "'manager' in group_names"
+  tags: install
+
+- name: Add k8s_pods_status to dummy host
+  add_host:
+    name: "check_k8s_pods"
+    k8s_pods_status: "{{ k8s_pods.stdout }}"
+  tags: install
+
+- name: Fail message
+  fail:
+    msg: "{{ docker_pull_limit_msg }}"
+  when:
+    - "'ImagePullBackOff' in hostvars['check_k8s_pods']['k8s_pods_status'] or 'ErrImagePull' in hostvars['check_k8s_pods']['k8s_pods_status']"
+    - not hostvars['127.0.0.1']['docker_username'] and not hostvars['127.0.0.1']['docker_password']
+
+- name: Docker login
+  command: docker login -u {{ hostvars['127.0.0.1']['docker_username'] }} -p {{ hostvars['127.0.0.1']['docker_password'] }}
+  changed_when: true
+  register: docker_login_output
+  failed_when: false
+  when:
+    - "'ImagePullBackOff' in hostvars['check_k8s_pods']['k8s_pods_status'] or 'ErrImagePull' in hostvars['check_k8s_pods']['k8s_pods_status']"
+    - hostvars['127.0.0.1']['docker_username'] or hostvars['127.0.0.1']['docker_password']
+    - "'compute' in group_names"
+  no_log: true
+
+- name: Docker login check
+  fail:
+    msg: "{{ docker_login_fail_msg }}"
+  when:
+    - docker_login_output is failed
+    - "'compute' in group_names"
+
+- name: Pull K8s services docker images
+  command: docker pull {{ item }}
+  with_items: "{{ k8s_docker_images }}"
+  when:
+    - "'ImagePullBackOff' in hostvars['check_k8s_pods']['k8s_pods_status'] or 'ErrImagePull' in hostvars['check_k8s_pods']['k8s_pods_status']"
+    - hostvars['127.0.0.1']['docker_username'] and hostvars['127.0.0.1']['docker_password']
+  register: docker_image_pull_result
+  until: docker_image_pull_result is not failed
+  retries: 5

+ 220 - 0
roles/k8s_start_services/tasks/deploy_k8s_services.yml

@@ -0,0 +1,220 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Include common variables
+  include_vars: ../../slurm_exporter/vars/main.yml
+
+- name: Include k8s_nfs_server_setup variables
+  include_vars: ../../k8s_nfs_server_setup/vars/main.yml
+
+- name: Include powervault_me4_nfs variables
+  include_vars: ../../powervault_me4_nfs/vars/main.yml
+
+- name: Wait for CoreDNS to restart
+  command: kubectl rollout status deployment/coredns -n kube-system  --timeout=5m
+  changed_when: false
+  failed_when: false
+  tags: init
+
+- name: Get K8s pods
+  command: kubectl get pods --all-namespaces
+  changed_when: false
+  register: k8s_pods
+  tags: init
+
+- name: Deploy MetalLB
+  command: "kubectl apply -f '{{ metallb_yaml_url }}'"
+  changed_when: true
+  when: "'metallb' not in k8s_pods.stdout"
+  tags: init
+
+- name: Create MetalLB Setup Config Files
+  copy:
+    src: metal-config.yaml
+    dest: "{{ metallb_config_file_dest }}"
+    owner: root
+    group: root
+    mode: "{{ metallb_config_file_mode }}"
+  tags: init
+
+- name: Create MetalLB Setup Deployment Files
+  copy:
+    src: metallb.yaml
+    dest: "{{ metallb_deployment_file_dest }}"
+    owner: root
+    group: root
+    mode: "{{ metallb_deployment_file_mode }}"
+  tags: init
+
+- name: Deploy MetalLB
+  command: "kubectl apply -f '{{ metallb_deployment_file_dest }}'"
+  changed_when: true
+  when: "'metallb' not in k8s_pods.stdout"
+  tags: init
+
+- name: Create default setup for MetalLB
+  command: "kubectl apply -f '{{ metallb_config_file_dest }}'"
+  changed_when: true
+  when: "'metallb' not in k8s_pods.stdout"
+  tags: init
+
+- name: Start k8s dashboard
+  command: "kubectl create -f '{{ k8s_dashboard_yaml_url }}'"
+  changed_when: true
+  when: "'kubernetes-dashboard' not in k8s_pods.stdout"
+  tags: init
+
+- name: Copy k8s_dashboard_admin.yml file
+  copy:
+    src: k8s_dashboard_admin.yaml
+    dest: "{{ k8s_dashboard_admin_file_dest }}"
+    owner: root
+    group: root
+    mode: "{{ k8s_dashboard_admin_file_mode }}"
+
+- name: Create admin user for K8s dashboard
+  command: "kubectl apply -f {{ k8s_dashboard_admin_file_dest }}"
+  changed_when: true
+
+- name: Helm - add stable repo
+  command: "helm repo add stable '{{ helm_stable_repo_url }}'"
+  changed_when: true
+  tags: init
+
+- name: Helm - add Nvidia k8s-device-plugin (nvdp) repo
+  command: "helm repo add nvdp '{{ nvidia_k8s_device_plugin_repo_url }}'"
+  changed_when: true
+  tags: init
+
+- name: Helm - add Nvidia GPU discovery (nvgfd) repo
+  command: "helm repo add nvgfd '{{ nvidia_gpu_discovery_repo_url }}'"
+  changed_when: true
+  tags: init
+
+- name: Helm - update repo
+  command: helm repo update
+  changed_when: true
+  tags: init
+
+- name: Start NFS Client Provisioner
+  command: "helm install stable/nfs-client-provisioner --set nfs.server='{{ nfs_server }}' --set nfs.path='{{ nfs_path }}' --generate-name"
+  changed_when: true
+  when: "'nfs-client-provisioner' not in k8s_pods.stdout"
+  tags: init
+
+- name: Set NFS-Client Provisioner as DEFAULT StorageClass
+  shell: >
+    kubectl patch storageclasses.storage.k8s.io nfs-client \
+    -p '{ "metadata": { "annotations":{ "storageclass.kubernetes.io/is-default-class":"true" }}}'
+  changed_when: true
+  tags: init
+
+- name: Check if prometheus is installed on the host
+  stat:
+    path: "{{ prometheus_path_on_host }}"
+  register: prometheus_status
+  changed_when: False
+  ignore_errors: yes
+  tags: init
+
+- name: Delete prometheus installed on host if it exists
+  file:
+    path: "{{ prometheus_path_on_host }}"
+    state: absent
+  when: prometheus_status.stat.exists
+  tags: init
+
+- name: Copy the slurm exporter config file
+  copy:
+    src: "{{ slurm_exporter_config_file }}"
+    dest: "{{ slurm_exporter_config_file_path }}"
+    owner: root
+    group: root
+    mode: "{{ slurm_exporter_file_mode }}"
+  tags: init
+
+- name: Fetch the public IP of the host
+  shell: >
+    set -o pipefail && \
+      ip route get 8.8.8.8 | awk '{print $7}'
+  register: public_ip
+  changed_when: False
+  tags: init
+
+- name: Add the host IP to config file
+  replace:
+    path: "{{ slurm_exporter_config_file_path }}{{ slurm_exporter_config_file }}"
+    regexp: "localhost:8080"
+    replace: "{{ public_ip.stdout }}:{{ slurm_exporter_port }}"
+  tags: init
+
+- name: Prometheus deployment
+  command: >
+    helm install stable/prometheus \
+    --set-file extraScrapeConfigs="{{ slurm_exporter_config_file_path }}{{ slurm_exporter_config_file }}" \
+    --set alertmanager.persistentVolume.storageClass=nfs-client,server.persistentVolume.storageClass=nfs-client,server.service.type=LoadBalancer \
+    --generate-name
+  changed_when: true
+  when: "'prometheus' not in k8s_pods.stdout"
+  tags: init
+
+- name: Install MPI Operator
+  command: "kubectl create -f '{{ mpi_operator_yaml_url }}'"
+  changed_when: true
+  when: "'mpi-operator' not in k8s_pods.stdout"
+  tags: init
+
+- name: Install nvidia-device-plugin
+  command: "helm install --version='{{ nvidia_device_plugin_version }}' --generate-name --set migStrategy='{{ mig_strategy }}' nvdp/nvidia-device-plugin"
+  changed_when: true
+  when: "'nvidia-device-plugin' not in k8s_pods.stdout"
+  tags: init
+
+- name: Install GPU Feature Discovery
+  command: "helm install --version='{{ gpu_feature_discovery_version }}' --generate-name --set migStrategy='{{ mig_strategy }}' nvgfd/gpu-feature-discovery"
+  changed_when: true
+  when: "'node-feature-discovery' not in k8s_pods.stdout"
+  tags: init
+
+- name: Deploy Xilinx Device plugin
+  command: "kubectl create -f '{{ fpga_device_plugin_yaml_url }}'"
+  changed_when: true
+  register: fpga_enable
+  when: "'fpga-device-plugin' not in k8s_pods.stdout"
+  tags: init
+
+- name: Deploy ROCm Device plugin
+  command: "kubectl create -f '{{ rocm_device_plugin_yaml_url }}'"
+  changed_when: true
+  register: amd_gpu_enable
+  when: "'amdgpu-device-plugin' not in k8s_pods.stdout"
+  tags: init
+
+- name: Deploy Volcano Scheduling
+  command: "kubectl apply -f '{{ volcano_scheduling_yaml_url }}'"
+  changed_when: true
+  when: "'volcano-system' not in k8s_pods.stdout"
+  tags: init
+
+- name: Install Spark Operator
+  command: "helm repo add spark-operator '{{ spark_operator_repo }}'"
+  changed_when: true
+  tags: init
+
+- name: Install Spark Operator Namespace
+  command: helm install my-release spark-operator/spark-operator --set image.tag={{ operator_image_tag }} --namespace spark-operator --create-namespace
+  changed_when: true
+  when: "'spark-operator' not in k8s_pods.stdout"
+  tags: init

+ 8 - 232
roles/k8s_start_services/tasks/main.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -13,235 +13,11 @@
 #  limitations under the License.
 ---
 
-- name: Include common variables
-  include_vars: ../../slurm_exporter/vars/main.yml
+- name: Deploy K8s services
+  include_tasks: deploy_k8s_services.yml
+  when: "'manager' in group_names"
+  tags: install
 
-- name: Wait for CoreDNS to restart
-  command: kubectl rollout status deployment/coredns -n kube-system  --timeout=5m
-  changed_when: false
-  failed_when: false
-  tags: init
-
-- name: Get K8s pods
-  command: kubectl get pods --all-namespaces
-  changed_when: false
-  register: k8s_pods
-  tags: init
-
-- name: Deploy MetalLB
-  command: "kubectl apply -f '{{ metallb_yaml_url }}'"
-  changed_when: true
-  when: "'metallb' not in k8s_pods.stdout"
-  tags: init
-
-- name: Create MetalLB Setup Config Files
-  copy:
-    src: metal-config.yaml
-    dest: "{{ metallb_config_file_dest }}"
-    owner: root
-    group: root
-    mode: "{{ metallb_config_file_mode }}"
-  tags: init
-
-- name: Create MetalLB Setup Deployment Files
-  copy:
-    src: metallb.yaml
-    dest: "{{ metallb_deployment_file_dest }}"
-    owner: root
-    group: root
-    mode: "{{ metallb_deployment_file_mode }}"
-  tags: init
-
-- name: Deploy MetalLB
-  command: "kubectl apply -f '{{ metallb_deployment_file_dest }}'"
-  changed_when: true
-  when: "'metallb' not in k8s_pods.stdout"
-  tags: init
-
-- name: Create default setup for MetalLB
-  command: "kubectl apply -f '{{ metallb_config_file_dest }}'"
-  changed_when: true
-  when: "'metallb' not in k8s_pods.stdout"
-  tags: init
-
-- name: Start k8s dashboard
-  command: "kubectl create -f '{{ k8s_dashboard_yaml_url }}'"
-  changed_when: true
-  when: "'kubernetes-dashboard' not in k8s_pods.stdout"
-  tags: init
-
-- name: Copy k8s_dashboard_admin.yml file
-  copy:
-    src: k8s_dashboard_admin.yaml
-    dest: "{{ k8s_dashboard_admin_file_dest }}"
-    owner: root
-    group: root
-    mode: "{{ k8s_dashboard_admin_file_mode }}"
-
-- name: Create admin user for K8s dashboard
-  command: "kubectl apply -f {{ k8s_dashboard_admin_file_dest }}"
-  changed_when: true
-
-- name: Helm - add stable repo
-  command: "helm repo add stable '{{ helm_stable_repo_url }}'"
-  changed_when: true
-  tags: init
-
-- name: Helm - add Nvidia k8s-device-plugin (nvdp) repo
-  command: "helm repo add nvdp '{{ nvidia_k8s_device_plugin_repo_url }}'"
-  changed_when: true
-  tags: init
-
-- name: Helm - add Nvidia GPU discovery (nvgfd) repo
-  command: "helm repo add nvgfd '{{ nvidia_gpu_discovery_repo_url }}'"
-  changed_when: true
-  tags: init
-
-- name: Helm - update repo
-  command: helm repo update
-  changed_when: true
-  tags: init
-
-- name: Start NFS Client Provisioner
-  command: "helm install stable/nfs-client-provisioner --set nfs.server='{{ nfs_server }}' --set nfs.path='{{ nfs_path }}' --generate-name"
-  changed_when: true
-  when: "'nfs-client-provisioner' not in k8s_pods.stdout"
-  tags: init
-
-- name: Set NFS-Client Provisioner as DEFAULT StorageClass
-  shell: >
-    kubectl patch storageclasses.storage.k8s.io nfs-client \
-    -p '{ "metadata": { "annotations":{ "storageclass.kubernetes.io/is-default-class":"true" }}}'
-  changed_when: true
-  tags: init
-
-- name: Check if prometheus is installed on the host
-  stat:
-    path: "{{ prometheus_path_on_host }}"
-  register: prometheus_status
-  changed_when: False
-  ignore_errors: yes
-  tags: init
-
-- name: Delete prometheus installed on host if it exists
-  file:
-    path: "{{ prometheus_path_on_host }}"
-    state: absent
-  when: prometheus_status.stat.exists
-  tags: init
-
-- name: Copy the slurm exporter config file
-  copy:
-    src: "{{ slurm_exporter_config_file }}"
-    dest: "{{ slurm_exporter_config_file_path }}"
-    owner: root
-    group: root
-    mode: "{{ slurm_exporter_file_mode }}"
-  tags: init
-
-- name: Fetch the public IP of the host
-  shell: >
-    set -o pipefail && \
-      ip route get 8.8.8.8 | awk '{print $7}'
-  register: public_ip
-  changed_when: False
-  tags: init
-
-- name: Add the host IP to config file
-  replace:
-    path: "{{ slurm_exporter_config_file_path }}{{ slurm_exporter_config_file }}"
-    regexp: "localhost:8080"
-    replace: "{{ public_ip.stdout }}:{{ slurm_exporter_port }}"
-  tags: init
-
-- name: Prometheus deployment
-  command: >
-    helm install stable/prometheus \
-    --set-file extraScrapeConfigs="{{ slurm_exporter_config_file_path }}{{ slurm_exporter_config_file }}" \
-    --set alertmanager.persistentVolume.storageClass=nfs-client,server.persistentVolume.storageClass=nfs-client,server.service.type=LoadBalancer \
-    --generate-name
-  changed_when: true
-  when: "'prometheus' not in k8s_pods.stdout"
-  tags: init
-
-- name: Install MPI Operator
-  command: "kubectl create -f '{{ mpi_operator_yaml_url }}'"
-  changed_when: true
-  when: "'mpi-operator' not in k8s_pods.stdout"
-  tags: init
-
-- name: Install nvidia-device-plugin
-  command: "helm install --version='{{ nvidia_device_plugin_version }}' --generate-name --set migStrategy='{{ mig_strategy }}' nvdp/nvidia-device-plugin"
-  changed_when: true
-  when: "'nvidia-device-plugin' not in k8s_pods.stdout"
-  tags: init
-
-- name: Install GPU Feature Discovery
-  command: "helm install --version='{{ gpu_feature_discovery_version }}' --generate-name --set migStrategy='{{ mig_strategy }}' nvgfd/gpu-feature-discovery"
-  changed_when: true
-  when: "'node-feature-discovery' not in k8s_pods.stdout"
-  tags: init
-
-- name: Deploy Xilinx Device plugin
-  command: "kubectl create -f '{{ fpga_device_plugin_yaml_url }}'"
-  changed_when: true
-  register: fpga_enable
-  when: "'fpga-device-plugin' not in k8s_pods.stdout"
-  tags: init
-
-- name: Deploy ROCm Device plugin
-  command: "kubectl create -f '{{ rocm_device_plugin_yaml_url }}'"
-  changed_when: true
-  register: amd_gpu_enable
-  when: "'amdgpu-device-plugin' not in k8s_pods.stdout"
-  tags: init
-
-- name: Deploy Volcano Scheduling
-  command: "kubectl apply -f '{{ volcano_scheduling_yaml_url }}'"
-  changed_when: true
-  when: "'volcano-system' not in k8s_pods.stdout"
-  tags: init
-
-- name: Install Spark Operator
-  command: "helm repo add spark-operator '{{ spark_operator_repo }}'"
-  changed_when: true
-  tags: init
-
-- name: Install Spark Operator Namespace
-  command: helm install my-release spark-operator/spark-operator --set image.tag={{ operator_image_tag }} --namespace spark-operator --create-namespace
-  changed_when: true
-  when: "'spark-operator' not in k8s_pods.stdout"
-  tags: init
-
-- name: Wait for k8s pod to come to ready state
-  block:
-    - name: Wait for k8s pod to come to ready state
-      command: "kubectl wait --for=condition=ready -n {{ item.namespace }} pod -l app={{ item.app }} --timeout={{ item.timeout }}"
-      with_items:
-        - { namespace: "default", app: "nfs-client-provisioner", timeout: "10m" }
-        - { namespace: "volcano-system", app: "volcano-scheduler", timeout: "5m" }
-      changed_when: false
-      tags: install
-  rescue:
-    - name: Get K8s pods
-      command: kubectl get pods --all-namespaces
-      changed_when: false
-      register: k8s_pods
-      tags: init
-
-    - name: Fail message
-      fail:
-        msg: "{{ docker_pull_limit_msg }}"
-      when:
-        - "'ImagePullBackOff' in k8s_pods.stdout or 'ErrImagePull' in k8s_pods.stdout"
-        - not hostvars['127.0.0.1']['docker_username'] and not hostvars['127.0.0.1']['docker_password']
-
-    - name: Pull K8s services docker images
-      command: docker pull {{ item }}
-      with_items: "{{ k8s_docker_images }}"
-      when:
-        - "'ImagePullBackOff' in k8s_pods.stdout or 'ErrImagePull' in k8s_pods.stdout"
-        - hostvars['127.0.0.1']['docker_username'] and hostvars['127.0.0.1']['docker_password']
-      register: docker_image_pull_result
-      until: docker_image_pull_result is not failed
-      retries: 5
+- name: Check K8s pods
+  include_tasks: check_k8s_pods.yml
+  tags: install

+ 5 - 0
roles/k8s_start_services/vars/main.yml

@@ -18,6 +18,7 @@ k8s_docker_images:
   - docker.io/calico/cni:v3.19.1
   - docker.io/calico/pod2daemon-flexvol:v3.19.1
   - docker.io/calico/node:v3.19.1
+  - quay.io/coreos/flannel:v0.14.0
   - xilinxatg/xilinx_k8s_fpga_plugin:2020.11.24
   - nvidia/k8s-device-plugin:v0.7.0
   - quay.io/external_storage/nfs-client-provisioner:v3.1.0-k8s1.11
@@ -35,6 +36,10 @@ k8s_docker_images:
   - volcanosh/vc-controller-manager:latest
   - volcanosh/vc-scheduler:latest
   - volcanosh/vc-webhook-manager:latest
+  - mpioperator/mpi-operator:latest
+  - rocm/k8s-device-plugin
+
+docker_login_fail_msg: "Docker login failed! Please check the credentials and re-execute playbook."
 
 docker_pull_limit_msg: "You have reached your docker pull rate limit. Please provide docker credentials in omnia_config.yml and try again"
 

+ 2 - 1
roles/login_server/tasks/install_packages.yml

@@ -24,6 +24,7 @@
     src: "{{ resolv_conf_path }}"
     dest: "{{ temp_resolv_conf_path }}"
     mode: "{{ resolv_file_mode }}"
+    remote_src: yes
 
 - name: Add the domain name in /etc/resolv.conf
   replace:
@@ -39,4 +40,4 @@
     regexp: "# Generated by NetworkManager"
     replace: "# Generated by NetworkManager\nsearch {{ hostvars['127.0.0.1']['domain_name'] }}"
   when:
-    replace_output.msg | length == 0
+    replace_output.msg | length == 0

+ 8 - 2
roles/slurm_workers/tasks/main.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -114,6 +114,9 @@
   with_items:
     - "{{ groups['compute'] }}"
   when: '"compute" in group_names'
+  delegate_to: "{{ item }}"
+  with_items:
+    - "{{ play_hosts }}"
 
 - name: Add login node core & socket info in slurm config file
   lineinfile:
@@ -127,6 +130,9 @@
   when:
     - hostvars["127.0.0.1"]["login_node_required"]
     - '"login_node" in group_names'
+  delegate_to: "{{ item }}"
+  with_items:
+    - "{{ play_hosts }}"
 
 - name: Update hostnames of compute node when ALL in partition nodes
   replace:
@@ -152,4 +158,4 @@
   fetch:
     src: "{{ slurm_confpth }}"
     dest: "{{ buffer_path }}"
-    flat: true
+    flat: true

+ 901 - 0
test/test_omnia_1.1.yml

@@ -0,0 +1,901 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+#Testcase OMNIA_1.1_US_CRD_TC_001
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with default parameters
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: localhost
+
+  tasks:
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_001
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_001
+
+#Testcase OMNIA_1.1_US_CRD_TC_005
+# Execute omnia.yml with addition of new compute node
+- name: OMNIA_1.1_US_CRD_TC_005
+  hosts: localhost
+
+  tasks:
+  
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_005
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_005
+
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_005
+
+    - name: Creating test inventory file 
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+        
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}          
+          {{ host5 }}
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+
+    - name: Verify if new compute node is added
+      command: ansible --list-hosts compute -i test_inventory.yml
+      changed_when: false
+      register: compute_info
+      tags: TC_005
+
+    - name: Validate compute node
+      assert:
+         that: 
+           - "'{{ host5 }}' in compute_info.stdout"
+         success_msg: "{{ compute_node_success_msg }}"
+         fail_msg: "{{ compute_node_fail_msg }}"
+      tags: TC_005
+
+#Testcase OMNIA_1.1_US_CRD_TC_006
+# Execute omnia.yml after removal of new compute node
+- name: OMNIA_1.1_US_CRD_TC_006
+  hosts: localhost
+
+  tasks:
+
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_006
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_006
+
+    - name: Delete one compute node
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }} 
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+      tags: TC_006
+       
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_006
+
+    - name: Verify if one compute node is deleted
+      command:  ansible --list-hosts compute -i test_inventory.yml
+      register: compute_info
+      changed_when: false
+      tags: TC_006
+
+    - name: Validate compute node
+      assert:
+         that: 
+           - "'{{ host5 }}' not in compute_info.stdout"
+         success_msg: "{{ compute_node_del_success_msg }}"
+         fail_msg: "{{ compute_node_del_fail_msg }}"
+      tags: TC_006
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_006
+
+#Testcase OMNIA_1.1_US_CRD_TC_008
+# Execute Jupyterhub.yml and then Kubeflow.yml
+- name: OMNIA_1.1_US_CRD_TC_008
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_008
+
+    - name: Install Jupyterhub
+      command: ansible-playbook platforms/jupyterhub.yml -i inventory
+      changed_when: false
+      args:
+        chdir: ../
+      tags: TC_008
+
+    - name: Install Kubeflow
+      command: ansible-playbook platforms/kubeflow.yml -i inventory
+      changed_when: false
+      args:
+        chdir: ../
+      tags: TC_008
+
+- name: OMNIA_1.1_US_CRD_TC_008
+  hosts: manager
+  vars_files:
+    - test_vars/test_jupyterhub_vars.yml
+    - test_vars/test_kubeflow_vars.yml
+
+  tasks:
+    - name: Waiting for the pods deployment
+      pause:
+        minutes: 5
+      tags: TC_008
+      
+    - name: Checking K8s services
+      command: kubectl get services
+      register: k8s_services
+      changed_when: false
+      failed_when: True
+      tags: TC_008
+
+    - name: Validating JupyterHub services
+      assert:
+        that:
+          - "'hub' in k8s_services.stdout"
+          - "'proxy-public' in k8s_services.stdout"
+          - "'proxy-api' in k8s_services.stdout"
+        fail_msg: "{{ jupyterhub_services_fail_msg }}"
+        success_msg: "{{ jupyterhub_services_success_msg }}"
+      tags: TC_008
+    
+    - name: Checking all running pods under jupyterhub namespace
+      command: kubectl get pods --namespace jupyterhub --field-selector=status.phase=Running
+      register: jupyterhub_running_pods
+      changed_when: false
+      failed_when: True
+      tags: TC_008
+
+    - name: Validating JupyterHub pods
+      assert:
+        that:
+          - "'hub' in default_jupyterhub_pods.stdout"
+          - "'proxy' in default_jupyterhub_pods.stdout"
+        fail_msg: "{{ jupyterhub_pods_fail_msg }}"
+        success_msg: "{{ jupyterhub_pods_success_msg }}"
+      tags: TC_008
+
+    - name: Checking installed Kubeflow version
+      command: kfctl version
+      register: kfctl_version
+      changed_when: false
+      failed_when: True
+      tags: TC_008
+
+    - name: Checking pods under kubeflow namespace
+      command: kubectl get pods --namespace kubeflow
+      register: kubeflow_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_008
+
+    - name: Checking pods under istio-system namespace
+      command: kubectl get pods --namespace istio-system
+      register: istio_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_008
+
+    - name: Validating Kubeflow Installation
+      assert:
+        that:
+          - "'command not found' not in kfctl_version.stdout"
+        fail_msg: "{{ kubeflow_install_fail_msg }}"
+        success_msg: "{{ kubeflow_install_success_msg }}"
+      tags: TC_008
+
+    - name: Validating Kubeflow pods deployment
+      assert:
+        that:
+          - "'Running' in kubeflow_pods.stdout or 'ContainerCreating' in kubeflow_pods.stdout"
+          - "'Running' in istio_system_pods.stdout or 'ContainerCreating' in istio_system_pods.stdout"
+        fail_msg: "{{ kubeflow_pods_deployment_fail_msg }}"
+        success_msg: "{{ kubeflow_pods_deployment_success_msg }}"
+      tags: TC_008
+
+#Testcase OMNIA_1.1_US_CRD_TC_009
+# Execute omnia.yml and reboot all the nodes
+- name: OMNIA_1.1_US_CRD_TC_009
+  hosts: localhost
+  vars_files:
+    - test_vars/test_k8s_common_vars.yml
+    - test_vars/test_slurm_common_vars.yml
+
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_009
+
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_009
+
+
+    - name: Reboot the  nodes
+      command: ansible all -i ../inventory -b -B 1 -P 0 -m shell -a "sleep {{ sleep_time }} && reboot"
+      changed_when: false
+      tags: TC_009
+
+    - name: Waiting for services to restart
+      pause:
+         minutes: "{{ pod_time }}"
+      tags: TC_009
+
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_009
+
+
+# Testcase OMNIA_1.1_US_CRD_TC_002
+# Execute omnia.yml with single node scenario (manager, compute and login node on same server)
+- name: OMNIA_1.1_US_CRD_TC_002
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_002
+
+    - name: Creating test inventory file for single node scenario
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host1 }}  
+          
+          [login_node]
+          {{ host1 }}
+           
+          [nfs_node]
+      tags: TC_002
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_002
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_invalid }}"'
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_002
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_002
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      failed_when: true
+      changed_when: false
+      register: db_error
+      args: 
+        chdir: ../
+      tags: TC_002
+      
+    - name: Validate mariadb password error
+      assert:
+        that:
+          - '" mariadb_password not given in correct format" not in db_error.stdout'
+        fail_msg: "{{ mariadb_password_error_fail_msg }}"
+        success_msg: "{{ mariadb_password_error_success_msg }}"
+      tags: TC_002
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_002
+
+# Testcase OMNIA_1.1_US_CRD_TC_003
+# Execute omnia.yml with single node scenario (manager, compute,login,nfs node on same server) 
+- name: OMNIA_1.1_US_CRD_TC_003
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_003
+
+    - name: Creating inventory file for single node scenario
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host1 }}  
+          
+          [login_node]
+          {{ host1 }}
+
+          [nfs_node]
+      tags: TC_003
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_003
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_invalid }}"'
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_003
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_003
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      failed_when: true
+      changed_when: false
+      register: db_error
+      args: 
+        chdir: ../
+      tags: TC_003
+      
+    - name: Validate mariadb password error
+      assert:
+        that:
+          - '" mariadb_password not given in correct format" not in db_error.stdout'
+        fail_msg: "{{ mariadb_password_error_fail_msg }}"
+        success_msg: "{{ mariadb_password_error_success_msg }}"
+      tags: TC_003
+
+    - name: Delete the inventory file
+      changed_when: false
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_003
+
+#Testcase OMNIA_1.1_US_CRD_TC_004
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node on new kubernetes version
+- name: OMNIA_1.1_US_CRD_TC_004
+  hosts: localhost
+
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_004
+
+    - name: Creating test inventory
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}  
+          
+          [login_node]
+          {{ host3 }}
+         
+          [nfs_node]
+
+      tags: TC_004
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_004
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_two }}"'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_complex }}"'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_default }}"'
+      tags: TC_004
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_version: ""'
+        replace: 'k8s_version: "{{ k8s_new_version }}"'
+      tags: TC_004
+
+    
+    - name: Execute omnia.yml 
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_004
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_004
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_004
+
+#Testcase OMNIA_1.1_US_CRD_TC_007
+# Execute omnia.yml after redeploying the cluster
+- name: OMNIA_1.1_US_CRD_TC_007
+  hosts: localhost
+
+  tasks:
+    - name: Execute omnia.yml with default input parameters
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_007
+
+    - name: Re Execute omnia.yml
+      command: ansible-playbook omnia.yml -i inventory
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_007
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i ../inventory
+      changed_when: false
+      tags: TC_007
+
+# Testcase OMNIA_1.1_US_CRD_TC_010
+# Execute omnia.yml with same server for manager and compute with slurm first and kubernetes later
+- name: OMNIA_1.1_US_CRD_TC_010
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_010
+
+    - name: Creating test inventory file
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host1 }}  
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+      tags: TC_010
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_010
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_default }}"'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: true'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_010
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'ipa_admin_password: ""'
+        replace: 'ipa_admin_password: "{{ ipa_passwd_default }}"' 
+      tags: TC_010
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags kubernetes
+      failed_when: true
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_010
+
+    - name: Re Execute omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags slurm,freeipa
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_010
+    
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_010
+
+
+
+# Testcase OMNIA_1.1_US_CRD_TC_011
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with slurm first and kubernetes later
+- name: OMNIA_1.1_US_CRD_TC_011
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_011
+
+    - name: Creating inventory file for
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}  
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+          {{ host4 }}
+      tags: TC_011
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_011
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_one }}"'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_default }}"'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required: '
+        replace: 'login_node_required: true'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_011
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'ipa_admin_password: ""'
+        replace: 'ipa_admin_password: "{{ ipa_passwd_complex }}"'
+      tags: TC_011
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags kubernetes
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_011
+    
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags slurm,freeipa
+      changed_when: false
+      args: 
+        chdir: ../
+      tags: TC_011
+
+    - name: Validate omnia.yml
+      command: ansible-playbook test_omnia_validation.yml -i test_inventory.yml
+      changed_when: false
+      tags: TC_011
+
+    - name: Delete the inventory file
+      changed_when: false
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_011
+
+# Testcase OMNIA_1.1_US_CRD_TC_012
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with slurm first and kubernetes later
+- name: OMNIA_1.1_US_CRD_TC_012
+  hosts: localhost
+  tasks:
+    - name: Include variable file
+      include_vars: test_vars/test_omnia_1.1_vars.yml
+      tags: TC_012
+
+    - name: Creating test inventory file 
+      copy:
+        dest: "test_inventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+         
+          [manager]
+          {{ host1 }}
+
+          [compute]
+          {{ host2 }}  
+          
+          [login_node]
+          {{ host3 }}
+
+          [nfs_node]
+      tags: TC_012
+
+    - name: Check if omnia config file is encrypted
+      command: cat ../{{ config_filename }}
+      changed_when: false
+      register: config_content
+      no_log: True
+      tags: TC_012
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt ../{{ config_filename }}
+        --vault-password-file ../{{ config_vaultname }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_cni: ""'
+        replace: 'k8s_cni: "{{ k8s_cni_two }}"'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'mariadb_password: ""'
+        replace: 'mariadb_password: "{{ db_passwd_default }}"'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'login_node_required:'
+        replace: 'login_node_required: false'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'k8s_pod_network_cidr: ""'
+        replace: 'k8s_pod_network_cidr: "{{ k8s_pod_network_cidr_other }}"'
+      tags: TC_012
+
+    - name: Edit input parameters in omnia_config.yml
+      replace:
+        path: ../omnia_config.yml
+        regexp: 'ipa_admin_password: ""'
+        replace: 'ipa_admin_password: "{{ ipa_passwd_invalid }}"'
+      tags: TC_012
+      
+    - name: Run omnia.yml
+      command: ansible-playbook omnia.yml -i test/test_inventory.yml --skip-tags kubernetes
+      failed_when: true
+      changed_when: false
+      register: ipa_error
+      args: 
+        chdir: ../
+      tags: TC_012
+      
+    - name: Validate ipa admin password error
+      assert:
+        that:
+          - '" Incorrect format provided for ipa_admin_password" not in ipa_error.stdout'
+        fail_msg: "{{ ipa_password_error_fail_msg }}"
+        success_msg: "{{ ipa_password_error_success_msg }}"
+      tags: TC_012
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: test_inventory.yml
+      tags: TC_012

+ 468 - 0
test/test_omnia_validation.yml

@@ -0,0 +1,468 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+# Testcase OMNIA_1.1_US_CRD_TC_001
+# Execute omnia.yml with separate servers for manager,compute,login,nfs node with default parameters
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: manager, compute
+  vars_files:
+    - test_vars/test_k8s_common_vars.yml
+    - test_vars/test_slurm_common_vars.yml
+  tasks:
+    - name: Checking K8s service status
+      systemd:
+        name: kubelet
+      register: kubelet_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating K8s service status
+      assert:
+        that:
+          - kubelet_service.status.ActiveState == 'active'
+        fail_msg: "{{ kubelet_service_fail_msg }}"
+        success_msg: "{{ kubelet_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking munge service status
+      systemd:
+        name: munge
+      register: munge_service
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating munge service status
+      assert:
+        that:
+          - munge_service.status.ActiveState == 'active'
+        fail_msg: "{{ munge_service_fail_msg }}"
+        success_msg: "{{ munge_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: manager
+  vars_files:
+    - test_vars/test_k8s_start_manager_workers_vars.yml
+    - test_vars/test_k8s_start_services_vars.yml
+    - test_vars/test_slurmexporter_vars.yml
+    - test_vars/test_slurm_start_services_vars.yml
+    - test_vars/test_login_server_vars.yml
+    - test_vars/test_slurm_manager_vars.yml
+    - test_vars/test_login_node_vars.yml
+
+  tasks:      
+    - name: Checking kube-system pods
+      command: kubectl get pods --namespace kube-system --field-selector=status.phase=Running
+      register: kube_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating controller-manager and scheduler and coreDNS pods status
+      assert:
+        that:
+          - "'kube-scheduler' in kube_system_pods.stdout"
+          - "'kube-controller' in kube_system_pods.stdout"
+        fail_msg: "{{ controller_scheduler_status_fail_msg }}"
+        success_msg: "{{ controller_scheduler_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating coreDNS pods status
+      assert:
+        that:
+          - "'coredns' in kube_system_pods.stdout"
+        fail_msg: "{{ coredns_status_fail_msg }}"
+        success_msg: "{{ coredns_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking all running pods
+      command: kubectl get pods --all-namespaces --field-selector=status.phase=Running
+      register: running_pods
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating Metallb, Prometheus and MPI pods
+      assert:
+        that:
+          - "'metallb' in running_pods.stdout"
+          - "'prometheus' in running_pods.stdout"
+          - "'mpi-operator' in running_pods.stdout"
+        fail_msg: "{{ metallb_prometheus_mpi_pods_fail_msg }}"
+        success_msg: "{{ metallb_prometheus_mpi_pods_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating K8s dashboard
+      assert:
+        that:
+          - "'kubernetes-dashboard' in running_pods.stdout"
+        fail_msg: "{{ kubernetes_dashboard_fail_msg }}"
+        success_msg: "{{ kubernetes_dashboard_success_msg }}"
+      tags: VERIFY_OMNIA_01  
+    
+    - name: Verify slurm exporter status
+      systemd:
+        name: prometheus-slurm-exporter
+      register: slurm_exporter_status
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm exporter service status
+      assert:
+        that:
+          - slurm_exporter_status.status.ActiveState == 'active'
+        fail_msg: "{{ slurm_exporter_service_fail_msg }}"
+        success_msg: "{{ slurm_exporter_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Verify slurm exporter job in k8s services
+      shell: >-
+        export POD_NAME=$(kubectl get pods --namespace 
+        default -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}")
+      changed_when: true
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Get pod name
+      shell: echo $POD_NAME
+      register: pod_name
+      changed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Check if prometheus-server is in running state
+      command: kubectl get pods {{ pod_name.stdout }}
+      register: slurm_exporter_pod_status
+      ignore_errors: yes
+      changed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm exporter job in k8s services
+      assert:
+        that:
+          - "'Error from server' not in slurm_exporter_pod_status.stdout"
+        fail_msg: "{{ slurm_exporter_job_fail_msg }}"
+        success_msg: "{{ slurm_exporter_job_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking mariadb service status
+      systemd:
+        name: mariadb
+      register: mariadb_service
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating mariadb service status
+      assert:
+        that:
+          - mariadb_service.status.ActiveState == 'active'
+        fail_msg: "{{ mariadb_service_fail_msg }}"
+        success_msg: "{{ mariadb_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking slurmctld service status
+      systemd:
+        name: slurmctld
+      register: slurmctld_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking slurmdbd service status
+      systemd:
+        name: slurmdbd
+      register: slurmdbd_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Check if slurm is installed
+      command: sinfo -V
+      register: slurm_version
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating slurmctld service status
+      assert:
+        that:
+          - slurmctld_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmctld_service_fail_msg }}"
+        success_msg: "{{ slurmctld_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating slurmdbd service status
+      assert:
+        that:
+          - slurmdbd_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmdbd_service_fail_msg }}"
+        success_msg: "{{ slurmdbd_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm installation
+      assert:
+        that: "'command not found' not in slurm_version.stdout"
+        fail_msg: "{{ slurm_status_fail_msg }}"
+        success_msg: "{{ slurm_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Submit kubernetes job
+      command: kubectl run nginx --image=nginx --restart=Never
+      changed_when: false
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Check submitted kubernetes job status
+      command: kubectl get pod nginx
+      register: kubo_job
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate kubernetes job submission
+      assert:
+        that: "'pods nginx not found' not in kubo_job.stdout"
+        fail_msg: "{{ kubernetes_job_status_fail_msg }}"
+        success_msg: "{{ kubernetes_job_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+         
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: compute
+  vars_files:
+    - test_vars/test_slurm_workers_vars.yml
+  tasks:    
+    - name: Check if slurm is installed
+      command: sinfo -V
+      register: slurm_version
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking slurmd service status
+      service:
+        name: slurmd.service
+      register: slurmd_service
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm installation
+      assert:
+        that: "'command not found' not in slurm_version.stdout"
+        fail_msg: "{{ slurm_status_fail_msg }}"
+        success_msg: "{{ slurm_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Validating slurmd service status
+      assert:
+        that:
+          - slurmd_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmd_service_fail_msg }}"
+        success_msg: "{{ slurmd_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: manager, login_node
+  vars_files:
+    - test_vars/test_login_common_vars.yml
+    
+  tasks:    
+    - name: Checking installed Freeipa version
+      command: ipa --version
+      register: ipa_version
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating Freeipa Installation
+      assert:
+        that:
+          - "'command not found' not in ipa_version.stdout"
+        fail_msg: "{{ ipa_install_fail_msg }}"
+        success_msg: "{{ ipa_install_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Start and enable firewalld
+      service:
+        name: firewalld
+        state: started
+        enabled: yes
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking firewalld open ports on manager/login node
+      command: firewall-cmd --list-ports
+      changed_when: false
+      register: login_common_firewalld_ports
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating firewalld open ports on manager/login node
+      assert:
+        that:
+          - "'80/tcp' in login_common_firewalld_ports.stdout"
+          - "'443/tcp' in login_common_firewalld_ports.stdout"
+          - "'389/tcp' in login_common_firewalld_ports.stdout"
+          - "'636/tcp' in login_common_firewalld_ports.stdout"
+          - "'88/tcp' in login_common_firewalld_ports.stdout"
+          - "'464/tcp' in login_common_firewalld_ports.stdout"
+          - "'88/udp' in login_common_firewalld_ports.stdout"
+          - "'464/udp' in login_common_firewalld_ports.stdout"
+          - "'53/tcp' in login_common_firewalld_ports.stdout"
+          - "'53/udp' in login_common_firewalld_ports.stdout"
+          - "'123/udp' in login_common_firewalld_ports.stdout"
+          - "'7389/tcp' in login_common_firewalld_ports.stdout"
+        fail_msg: "{{ login_common_ports_status_fail_msg }}"
+        success_msg: "{{ login_common_ports_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Stop and disable firewalld
+      service:
+        name: firewalld
+        state: stopped
+        enabled: no
+      tags: VERIFY_OMNIA_01
+
+    - name: Check Freeipa server/client configuration
+      command: ipa help topics
+      register: ipa_config
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating Freeipa server/client Configuration
+      assert:
+        that:
+          - "'command not found' not in ipa_config.stdout"
+        fail_msg: "{{ ipa_configuration_fail_msg }}"
+        success_msg: "{{ ipa_configuration_success_msg }}"
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+    - name: Ensure host is present
+      shell: echo "{{ ipa_admin_password }}" | kinit admin
+      register: authen
+      changed_when: false
+      ignore_errors: true
+      tags: VERIFY_OMNIA_01
+   
+    - name: Validate admin user in ipa server/client
+      assert:
+        that:
+          - authen.rc == 0
+        fail_msg: "{{ admin_user_authentication_status_fail_msg }}"
+        success_msg: "{{ admin_user_authentication_status_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: login_node
+  gather_facts: false
+  vars_files:
+    - test_vars/test_login_node_vars.yml
+    - test_vars/test_slurm_workers_vars.yml
+    
+  tasks: 
+    - name: Checking slurmd service status
+      service:
+        name: slurmd.service
+      register: slurmd_service
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validating slurmd service status
+      assert:
+        that:
+          - slurmd_service.status.ActiveState == 'active'
+        fail_msg: "{{ slurmd_service_fail_msg }}"
+        success_msg: "{{ slurmd_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Submit slurm jobs
+      command: srun --nodes "{{ nodes }}" --ntasks-per-node "{{ ntasks }}" --partition normal hostname
+      register: job_status
+      changed_when: false
+      ignore_errors: True
+      tags: VERIFY_OMNIA_01
+
+    - name: Validate slurm job submission
+      assert:
+        that: "'compute.ipa.test' in job_status.stdout"
+        fail_msg: "{{ slurm_job_status_fail_msg }}"
+        success_msg: "{{ slurm_job_status_success_msg }}"
+      failed_when: false
+      tags: VERIFY_OMNIA_01
+
+- name: OMNIA_1.1_US_CRD_TC_001
+  hosts: nfs_node
+  vars_files:
+    - test_vars/test_nfs_node_vars.yml
+   
+  tasks:
+      
+    - name: Checking rpcbind service status
+      systemd:
+        name: rpcbind
+      register: rpcbind_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating rpcbind service status
+      assert:
+        that:
+          - rpcbind_service.status.ActiveState == 'active'
+        fail_msg: "{{ rpcbind_service_fail_msg }}"
+        success_msg: "{{ rpcbind_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Checking nfs-server service status
+      systemd:
+        name: nfs-server
+      register: nfs_server_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating nfs-server service status
+      assert:
+        that:
+          - nfs_server_service.status.ActiveState == 'active'
+        fail_msg: "{{ nfs_server_service_fail_msg }}"
+        success_msg: "{{ nfs_server_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking nfs-lock service status
+      systemd:
+        name: nfs-lock
+      register: nfs_lock_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating nfs-lock service status
+      assert:
+        that:
+          - nfs_lock_service.status.ActiveState == 'active'
+        fail_msg: "{{ nfs_lock_service_fail_msg }}"
+        success_msg: "{{ nfs_lock_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Checking nfs-idmap service status
+      systemd:
+        name: nfs-idmap
+      register: nfs_idmap_service
+      tags: VERIFY_OMNIA_01
+     
+    - name: Validating nfs-idmap service status
+      assert:
+        that:
+          - nfs_idmap_service.status.ActiveState == 'active'
+        fail_msg: "{{ nfs_idmap_service_fail_msg }}"
+        success_msg: "{{ nfs_idmap_service_success_msg }}"
+      tags: VERIFY_OMNIA_01
+      
+    - name: Check if nfs server setup is complete
+      command: exportfs -v
+      changed_when: false
+      register: nfs_share
+      tags: VERIFY_OMNIA_01
+      
+    - name: Validate nfs server setup
+      assert:
+        that: "'{{ nfs_dir }}' in nfs_share.stdout"
+        fail_msg: "{{ nfs_server_fail_msg }}"
+        success_msg: "{{ nfs_server_success_msg }}"
+      tags: VERIFY_OMNIA_01      

+ 28 - 0
test/test_vars/test_login_common_vars.yml

@@ -0,0 +1,28 @@
+
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+ipa_admin_password: "omnia@1234"
+login_common_ports_status_fail_msg: "Tcp/udp ports are not opened in manager/login node"
+login_common_ports_status_success_msg: "Tcp/udp ports are opened in manager/login node"
+
+ipa_install_fail_msg: "FreeIpa is not installed"
+ipa_install_success_msg: "FreeIpa is installed"
+
+ipa_configuration_fail_msg: "Freeipa is not configured properly"
+ipa_configuration_success_msg: "Freeipa is configured properly"
+
+admin_user_authentication_status_fail_msg: "Admin user denied access"
+admin_user_authentication_status_success_msg: "Admin user successfully authenticated" 

+ 31 - 0
test/test_vars/test_login_node_vars.yml

@@ -0,0 +1,31 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+ipa_client_packages:
+  - bind-utils
+  - freeipa-client
+  - ipa-admintools
+
+freeipa_client_packages_status_success_msg: "Freeipa-client packages are installed"
+freeipa_client_packages_status_fail_msg: "Freeipa-client packages are not installed"
+
+nodes: "1"
+ntasks: "1"
+
+slurm_job_status_fail_msg: "Slurm jobs execution failed"
+slurm_job_status_success_msg: "Slurm jobs executed and running successfully"
+
+kubernetes_job_status_fail_msg: "Kubernetes job failed"
+kubernetes_job_status_success_msg: "Kubernetes job is running successfully"

+ 25 - 0
test/test_vars/test_login_server_vars.yml

@@ -0,0 +1,25 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+ipa_server_packages:
+  - bind
+  - bind-dyndb-ldap
+  - ipa-server-dns
+  - freeipa-server
+
+
+freeipa_server_packages_status_success_msg: "Freeipa-server packages are installed"
+freeipa_server_packages_status_fail_msg: "Freeipa-server packages are not installed"
+

+ 34 - 0
test/test_vars/test_nfs_node_vars.yml

@@ -0,0 +1,34 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+nfs_dir: "/me4_k8s_nfs"
+
+nfs_dir_fail_msg: "nfs share directory is present"
+nfs_dir_success_msg: "Nfs share directory is not present"
+
+rpcbind_service_fail_msg: "Rpcbind service is not running"
+rpcbind_service_success_msg: "Rpcbind service is running"
+
+nfs_server_service_fail_msg: "nfs-server service is not running"
+nfs_server_service_success_msg: "nfs-server service is running"
+
+nfs_lock_service_fail_msg: "nfs-lock service is not running"
+nfs_lock_service_success_msg: "nfs-lock service is running"
+
+nfs_idmap_service_fail_msg: "nfs-idmap service is not running"
+nfs_idmap_service_success_msg: "nfs-idmap service is running"
+
+nfs_server_success_msg: "nfs server is setup successfully"
+nfs_server_fail_msg: "nfs server setup is unsuccessful"

+ 50 - 0
test/test_vars/test_omnia_1.1_vars.yml

@@ -0,0 +1,50 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+host1: 192.168.0.17
+host2: 192.168.0.19
+host3: 192.168.0.16
+host4: 192.168.0.22
+host5: 192.168.0.18
+
+config_filename: "omnia_config.yml"
+config_vaultname: ".omnia_vault_key"
+
+file_permission: "0644"
+db_passwd_invalid: "omnia123-"
+db_passwd_complex: "omnIaFD@123gn)opk"
+db_passwd_default: "password"
+k8s_cni_one: "flannel"
+k8s_cni_two: "calico"
+k8s_pod_network_cidr_default: "10.244.0.0/16"
+k8s_pod_network_cidr_other: "192.168.0.0/16"
+k8s_new_version: "1.19.3"
+ipa_passwd_invalid: "Omnia12-3"
+ipa_passwd_default: "omnia1234"
+ipa_passwd_complex: "Omnia@De9$123%"
+sleep_time: 5
+pod_time: 10
+
+compute_node_success_msg: "New compute node is successfully added to the cluster"
+compute_node_fail_msg: " New compute node failed to add in the cluster"
+
+compute_node_del_success_msg: "New compute node is successfully deleted from the cluster"
+compute_node_del_fail_msg: " New compute node failed to delete from the cluster"
+
+ipa_password_error_fail_msg: "ipa admin invalid password value passed"
+ipa_password_error_success_msg: "ipa admin invalid password value failed"
+
+mariadb_password_error_fail_msg: "mariadb password invalid value passed"
+mariadb_password_error_success_msg: "mariadb password invalid value failed"