Ver código fonte

Merge branch 'devel' into awx_one_touch

Lucas A. Wilson 3 anos atrás
pai
commit
3cf371226a
54 arquivos alterados com 3092 adições e 389 exclusões
  1. 6 3
      control_plane/collect_device_info.yml
  2. 4 3
      control_plane/collect_node_info.yml
  3. 31 28
      control_plane/input_params/base_vars.yml
  4. 10 7
      control_plane/input_params/idrac_tools_vars.yml
  5. 2 4
      control_plane/input_params/idrac_vars.yml
  6. 369 0
      control_plane/roles/collect_device_info/files/create_inventory.yml
  7. 30 13
      control_plane/roles/collect_device_info/tasks/main.yml
  8. 36 0
      control_plane/roles/collect_device_info/vars/main.yml
  9. 21 17
      control_plane/roles/collect_node_info/files/add_host.yml
  10. 99 22
      control_plane/roles/collect_node_info/files/create_inventory.yml
  11. 42 52
      control_plane/roles/collect_node_info/tasks/main.yml
  12. 12 3
      control_plane/roles/collect_node_info/vars/main.yml
  13. 8 34
      control_plane/roles/control_plane_common/tasks/fetch_base_inputs.yml
  14. 1 1
      control_plane/roles/control_plane_common/tasks/password_config.yml
  15. 65 3
      control_plane/roles/control_plane_common/tasks/verify_omnia_params.yml
  16. 12 1
      control_plane/roles/control_plane_common/vars/main.yml
  17. 6 11
      control_plane/roles/control_plane_ib/files/Dockerfile
  18. 0 48
      control_plane/roles/control_plane_ib/files/dhcpd.conf
  19. 0 43
      control_plane/roles/control_plane_ib/files/infiniband_inventory_creation.yml
  20. 2 2
      control_plane/roles/control_plane_ib/files/k8s_infiniband.yml
  21. 2 2
      control_plane/roles/control_plane_ib/files/temp_dhcp.template
  22. 4 10
      control_plane/roles/control_plane_ib/tasks/check_prerequisites.yml
  23. 2 2
      control_plane/roles/control_plane_ib/tasks/configure_infiniband_container.yml
  24. 1 1
      control_plane/roles/control_plane_ib/tasks/dhcp_configure.yml
  25. 3 8
      control_plane/roles/control_plane_ib/tasks/main.yml
  26. 0 1
      control_plane/roles/control_plane_ib/vars/main.yml
  27. 2 2
      control_plane/roles/powervault_me4/tasks/volume.yml
  28. 2 0
      control_plane/test/test_ib_inventory
  29. 101 0
      control_plane/test/test_ib_mtu.yml
  30. 333 0
      control_plane/test/test_infiniband_config.yml
  31. 156 0
      control_plane/test/test_infiniband_facts.yml
  32. 754 0
      control_plane/test/test_inventory.yml
  33. 147 0
      control_plane/test/test_inventory_validation.yml
  34. 150 16
      control_plane/test/test_powervault.yml
  35. 56 0
      control_plane/test/test_vars/test_infiniband_vars.yml
  36. 70 0
      control_plane/test/test_vars/test_inventory_vars.yml
  37. 4 1
      control_plane/test/test_vars/test_powervault_vars.yml
  38. 3 3
      control_plane/tools/provision_report.yml
  39. 7 2
      control_plane/tools/roles/fetch_password/tasks/main.yml
  40. 9 4
      control_plane/tools/roles/hpc_cluster_report/tasks/main.yml
  41. 8 14
      control_plane/tools/roles/hpc_cluster_report/templates/provision_host_report.j2
  42. 1 1
      control_plane/tools/roles/idrac_2fa/tasks/configure_smtp.yml
  43. 9 3
      control_plane/tools/roles/idrac_2fa/tasks/validate_2fa_vars.yml
  44. 64 14
      omnia.yml
  45. 4 2
      roles/k8s_nfs_client_setup/tasks/main.yml
  46. 1 1
      roles/k8s_nfs_client_setup/vars/main.yml
  47. 14 4
      roles/k8s_start_services/tasks/deploy_k8s_services.yml
  48. 3 3
      roles/k8s_start_services/vars/main.yml
  49. 21 0
      roles/powervault_me4_nfs/tasks/main.yml
  50. 79 0
      roles/powervault_me4_nfs/tasks/me4_nfs_server_setup.yml
  51. 111 0
      roles/powervault_me4_nfs/tasks/mount_me4_partitions.yml
  52. 137 0
      roles/powervault_me4_nfs/tasks/nfs_node_configure.yml
  53. 40 0
      roles/powervault_me4_nfs/tasks/nfs_volume.yml
  54. 38 0
      roles/powervault_me4_nfs/vars/main.yml

+ 6 - 3
control_plane/collect_device_info.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,9 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-- name: Dynamic Inventory
+
+- name: Collect control_plane device inventory
   hosts: localhost
   connection: local
-  gather_facts: no
+  gather_facts: false
   roles:
     - collect_device_info
+
+- import_playbook: "{{ playbook_dir }}/roles/collect_device_info/files/create_inventory.yml"

+ 4 - 3
control_plane/collect_node_info.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,9 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-- name: Dynamic Inventory
+
+- name: Collect control_plane host inventory
   hosts: localhost
   connection: local
-  gather_facts: no
+  gather_facts: false
   roles:
     - collect_node_info

+ 31 - 28
control_plane/input_params/base_vars.yml

@@ -12,9 +12,6 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 ---
-##All variables except mapping_file_path are mandatory##
-
-###default###
 
 # Path to directory hosting ansible config file (ansible.cfg file)
 # Default value is /etc/ansible
@@ -26,13 +23,13 @@ ansible_conf_file_path: /etc/ansible
 # It accepts boolean values "true" or "false". 
 # By default its value is "false".
 # If ethernet switch support is needed set this to "true"
-ethernet_switch_support: false
+ethernet_switch_support: true
 
 # This variable is used to enable infiniband switch configuration
 # It accepts boolean values "true" or "false". 
 # By default its value is "false".
 # If infiniband configuration is needed set this to "true"
-ib_switch_support: false
+ib_switch_support: true
 
 # This variable is used to enable powervault configuration
 # It accepts boolean values "true" or "false". 
@@ -89,7 +86,15 @@ language: "en-US"
 # The iso file should be CentOS7-2009-minimal edition.
 # Other iso files are not supported.
 # Mandatory value required
-iso_file_path: ""
+iso_file_path: "/root/CentOS-7-x86_64-Minimal-2009.iso"
+
+# Default lease time that will be used by dhcp
+# Its unit is seconds
+# Min: 21600 seconds
+# Default: 86400 seconds
+# Max: 31536000 seconds
+# Mandatory value required
+default_lease_time: "86400"
 
 ### Usage: control_plane_device ###
 
@@ -102,13 +107,13 @@ mngmnt_network_nic: "eno1"
 # The dhcp range for assigning the IPv4 address
 # Example: 172.17.0.1
 # Mandatory value required
-mngmnt_network_dhcp_start_range: ""
-mngmnt_network_dhcp_end_range: ""
+mngmnt_network_dhcp_start_range: "172.19.0.100"
+mngmnt_network_dhcp_end_range: "172.19.0.200"
 
-# The mapping file consists of the MAC address and its respective IP address and hostname.
-# The format of mapping file should be MAC,hostname,IP and must be a CSV file.
-# Eg: xx:yy:zz:aa:bb,server,172.17.0.5
-# A template for mapping file exists in omnia/examples and is named as mapping_file.csv.
+# The mapping file consists of the MAC address and its respective IP address.
+# The format of mapping file should be MAC,IP and must be a CSV file.
+# Eg: xx:yy:zz:aa:bb,172.17.0.5
+# A template for mapping file exists in omnia/examples and is named as mapping_device_file.csv.
 # This depicts the path where user has kept the mapping file for DHCP configurations.
 mngmnt_mapping_file_path: ""
 
@@ -122,13 +127,18 @@ host_network_nic: "eno3"
 # The dhcp range for assigning the IPv4 address
 # Example: 172.17.0.1
 # Mandatory value required
-host_network_dhcp_start_range: ""
-host_network_dhcp_end_range: ""
-
-# The mapping file consists of the MAC address and its respective IP address and hostname.
-# The format of mapping file should be MAC,hostname,IP and must be a CSV file.
-# Eg: xx:yy:zz:aa:bb,server,172.17.0.5
-# A template for mapping file exists in omnia/examples and is named as mapping_file.csv.
+host_network_dhcp_start_range: "172.17.0.100"
+host_network_dhcp_end_range: "172.17.0.200"
+
+# The mapping file consists of the MAC address and its respective IP address and Hostname and Component_role(if any)
+# 2 Formats are supported for host mapping files:
+# If user wants one-touch provisioning of omnia cluster, format of csv: MAC,Hostname,IP,Component_role
+# Component_role can take values: manager, compute, login_node, nfs_node
+# Ex. xx:yy:zz:aa:bb,server,172.17.0.5,manager.
+# A templates for mapping file exists in omnia/examples and is named as host_mapping_file_one_touch.csv
+# If user wants to skip one touch deployment and only static IP assignment is required, format of csv: MAC,Hostname,IP
+# Ex. xx:yy:zz:aa:bb,server,172.17.0.5
+# A templates for mapping file exists in omnia/examples and is named as host_mapping_file_os_provisioning.csv
 # This depicts the path where user has kept the mapping file for DHCP configurations.
 host_mapping_file_path: ""
 
@@ -141,12 +151,5 @@ ib_network_nic: "ib0"
 
 # The dhcp range for assigning the IPv4 address
 # Example: 172.17.0.1
-ib_network_dhcp_start_range: ""
-ib_network_dhcp_end_range: ""
-
-# The mapping file consists of the MAC address and its respective IP address and hostname.
-# The format of mapping file should be MAC,hostname,IP and must be a CSV file.
-# Eg: xx:yy:zz:aa:bb,server,172.17.0.5
-# A template for mapping file exists in omnia/examples and is named as mapping_file.csv.
-# This depicts the path where user has kept the mapping file for DHCP configurations.
-ib_mapping_file_path: ""
+ib_network_dhcp_start_range: "172.25.0.100"
+ib_network_dhcp_end_range: "172.25.0.200"

+ 10 - 7
control_plane/input_params/idrac_tools_vars.yml

@@ -35,18 +35,21 @@ ipv4_static_dns2: ""
 # Mandatory value required
 smtp_server_ip: ""
 
-# Username used for SMTP
+# Email address used for enabling 2FA
+# Mandatory value required
+use_email_address_2fa: ""
+
+# SMTP authentication disabled by default
+# If enabled provide smtp username and password
 # Mandatory value required
+smtp_authentication: "disabled"
+
+# Username used for SMTP
 smtp_username: ""
    
 # Password used for SMTP
-# Mandatory value required    
 smtp_password: ""
 
-# Email address used for enabling 2FA
-# Mandatory value required
-use_email_address_2fa: ""
-
 
 ### Usage: idrac_ldap ###
 
@@ -108,4 +111,4 @@ role_group1_dn: ""
 # Supported options are Administrator, Operator, ReadOnly
 # By default role_group1_privilege will be Administrator
 # Mandatory value required
-role_group1_privilege: "Administrator"
+role_group1_privilege: "Administrator"

+ 2 - 4
control_plane/input_params/idrac_vars.yml

@@ -23,9 +23,7 @@ idrac_system_profile: "Performance"
 # Boolean value indicating whether OMNIA should perform firmware update or not
 # It takes values "true" or "false" indicating required and not required cases respectively.
 # Default value is "true"
-# firmware_update_required should be 'false' now as there is bug in DSU & OMAM modules and firmware updates dependent on that.
-# It will be updated to 'true' once DSU and OMAM fix the bugs
-firmware_update_required: false
+firmware_update_required: true
 
 # This is the list of poweredge server models
 # The firmware updates will be downloaded only for the below list of models
@@ -71,4 +69,4 @@ two_factor_authentication: "disabled"
 # If required it can be "enabled"
 # Update 2FA input parameters in idrac_tools_vars.yml if two_factor_authentication is enabled
 # Command to edit idrac_tools_vars.yml: ansible-vault edit idrac_tools_vars.yml --vault-password-file .idrac_vault_key
-ldap_directory_services: "disabled"
+ldap_directory_services: "disabled"

+ 369 - 0
control_plane/roles/collect_device_info/files/create_inventory.yml

@@ -0,0 +1,369 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.​0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+
+# This role will not group the devices if user provides invalid credentials
+
+- name: Create inventory in awx
+  hosts: device_inventory
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Include collect_device_info vars
+      include_vars: "{{ playbook_dir }}/../vars/main.yml"
+      run_once: true
+
+    - name: Include variable file base_vars.yml
+      include_vars: "{{ base_vars_file }}"
+      run_once: true
+
+    - name: Check if tower_config_file file is encrypted
+      command: cat "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      changed_when: false
+      no_log: true
+      register: tower_config_content
+      run_once: true
+
+    - name: Decrypt tower_config_file
+      command: >-
+        ansible-vault decrypt "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        --vault-password-file "{{ playbook_dir }}/../../webui_awx/files/.tower_vault_key"
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in tower_config_content.stdout"
+      run_once: true
+
+    - name: Change file permissions
+      file:
+        path: "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        mode: "{{ file_perm }}"
+      run_once: true
+
+    - name: Fetch awx host
+      command: grep "host:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_host
+      changed_when: false
+      run_once: true
+
+    - name: Fetch awx username
+      command: grep "username:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_username
+      changed_when: false
+      run_once: true
+      no_log: true
+
+    - name: Fetch awx password
+      command: grep "password:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_password
+      changed_when: false
+      run_once: true
+      no_log: true
+
+    - name: Set awx variables
+      set_fact:
+        awx_host: "{{ fetch_awx_host.stdout | regex_replace('host: ','') }}"
+        awx_username: "{{ fetch_awx_username.stdout | regex_replace('username: ','') }}"
+        awx_password: "{{ fetch_awx_password.stdout | regex_replace('password: ','') }}"
+      no_log: true
+
+    - name: Encrypt tower_config_file
+      command: >-
+        ansible-vault encrypt "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        --vault-password-file "{{ playbook_dir }}/../../webui_awx/files/.tower_vault_key"
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in tower_config_content.stdout"
+      run_once: true
+
+    - name: Change file permissions
+      file:
+        path: "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        mode: "{{ file_perm }}"
+      run_once: true
+
+    - name: Check if {{ login_vars_file }} file is encrypted
+      command: cat {{ login_vars_file }}
+      changed_when: false
+      no_log: true
+      register: config_content
+      run_once: true
+
+    - name: Decrpyt {{ login_vars_file }}
+      command: >-
+        ansible-vault decrypt {{ login_vars_file }}
+        --vault-password-file {{ login_vault_file }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      changed_when: false
+      run_once: true
+
+    - name: Include variable file {{ login_vars_file }}
+      include_vars: "{{ login_vars_file }}"
+      no_log: true
+      run_once: true
+
+    - name: Encrypt {{ login_vars_file }}
+      command: >-
+        ansible-vault encrypt {{ login_vars_file }}
+        --vault-password-file {{ login_vault_file }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      run_once: true
+
+    - name: Initialize variables
+      set_fact:
+        idrac_inventory_status: false
+        ethernet_inventory_status: false
+        ib_inventory_status: false
+        powervault_me4_status: false
+
+    - name: idrac_inventory validation tasks
+      block:
+        - name: Fetch the hosts in idrac_inventory
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts list --inventory idrac_inventory -f human --filter "name"
+          changed_when: false
+          no_log: true
+          run_once: true
+          register: idrac_hosts
+
+        - name: Assert idrac IP
+          dellemc.openmanage.idrac_system_info:
+            idrac_ip: "{{ inventory_hostname }}"
+            idrac_user: "{{ idrac_username }}"
+            idrac_password: "{{ idrac_password }}"
+          register: idrac_info
+          when: inventory_hostname not in idrac_hosts.stdout
+
+        - name: Set idrac_inventory_status
+          set_fact:
+            idrac_inventory_status: true
+          when:
+            - inventory_hostname not in idrac_hosts.stdout
+            - idrac_search_key in idrac_info.system_info.iDRAC[0].ProductInfo
+      rescue:
+        - name: Failed while adding device to idrac_inventory
+          debug:
+            msg: "{{ idrac_inventory_fail_msg }}"
+      when: not idrac_inventory_status
+
+    - name: Add host to awx idrac_inventory
+      block:
+        - name: Add the host to awx idrac_inventory if not present
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts create --name {{ inventory_hostname }} --inventory idrac_inventory
+          changed_when: true
+          no_log: true
+      rescue:
+        - name: Failed while adding device to idrac_inventory
+          debug:
+            msg: "{{ idrac_inventory_fail_msg }}"
+      when: idrac_inventory_status
+
+    - name: ethernet_inventory validation tasks
+      block:
+        - name: Fetch the hosts in ethernet inventory
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts list --inventory ethernet_inventory -f human --filter "name"
+          changed_when: false
+          no_log: true
+          run_once: true
+          register: ethernet_switches
+
+        - name: Assert ethernet switch
+          dellos10_command:
+            provider:
+              host: "{{ inventory_hostname }}"
+              username: "{{ ethernet_switch_username }}"
+              password: "{{ ethernet_switch_password }}"
+            commands: ['show version']
+          when: inventory_hostname not in ethernet_switches.stdout
+          register: dellswitch_info
+          no_log: true
+
+        - name: Set ethernet_inventory_status
+          set_fact:
+            ethernet_inventory_status: true
+          when:
+            - inventory_hostname not in ethernet_switches.stdout
+            - dellswitch_info.stdout | regex_search(ethernet_search_key)
+      rescue:
+        - name: Failed while adding device to ethernet_inventory
+          debug:
+            msg: "{{ ethernet_inventory_fail_msg }}"
+      when:
+        - not idrac_inventory_status
+        - not ethernet_inventory_status
+        - inventory_hostname not in idrac_hosts.stdout
+
+    - name: Add the host to awx ethernet inventory
+      block:
+        - name: Add the host to awx ethernet inventory if not present
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts create --name {{ inventory_hostname }} --inventory ethernet_inventory
+          changed_when: true
+          no_log: true
+      rescue:
+        - name: Failed while adding device to ethernet_inventory
+          debug:
+            msg: "{{ ethernet_inventory_fail_msg }}"
+      when: ethernet_inventory_status
+
+    - name: ib_inventory validation tasks
+      block:
+        - name: Fetch the hosts in infiniband inventory
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts list --inventory infiniband_inventory -f human --filter "name"
+          changed_when: false
+          no_log: true
+          run_once: true
+          register: infiniband_switches
+
+        - name: Authenticate infiniband Switch
+          uri:
+            url: http://{{ inventory_hostname }}/admin/launch?script=rh&template=login&action=login
+            method: POST
+            body_format: form-urlencoded
+            body:
+              f_user_id: "{{ ib_username }}"
+              f_password: "{{ ib_password }}"
+              enter: Sign in
+            status_code: "{{ infiniband_status_code }}"
+          no_log: true
+          register: login
+          when: inventory_hostname not in infiniband_switches.stdout
+
+        - name: Assert infiniband switch
+          uri:
+            url: http://{{ inventory_hostname }}/admin/launch?script=json
+            method: POST
+            body_format: json
+            headers:
+              Cookie: "{{ login.set_cookie.split(';')[0] }}"
+            body:
+              {
+              "commands":
+              [
+                "show version"
+              ]
+              }
+          register: infinibandswitch_info
+          when:
+            - inventory_hostname not in infiniband_switches.stdout
+            - not login.failed
+
+        - name: Set ib_inventory_status
+          set_fact:
+            ib_inventory_status: true
+          when:
+            - inventory_hostname not in infiniband_switches.stdout
+            - not login.failed
+            - infinibandswitch_info.json.data['Product name'] == infiniband_search_key
+      rescue:
+        - name: Failed while adding device to ib_inventory
+          debug:
+            msg: "{{ ib_inventory_fail_msg }}"
+      when:
+        - not idrac_inventory_status
+        - not ethernet_inventory_status
+        - not ib_inventory_status
+        - inventory_hostname not in idrac_hosts.stdout
+
+    - name: Add the host to awx infiniband_inventory
+      block:
+        - name: Add the host to awx infiniband_inventory if not present
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts create --name {{ inventory_hostname }} --inventory infiniband_inventory
+          changed_when: true
+          no_log: true
+      rescue:
+        - name: Failed while adding device to ib_inventory
+          debug:
+            msg: "{{ ib_inventory_fail_msg }}"
+      when: ib_inventory_status
+
+    - name: powervault_me4_inventory validation tasks
+      block:
+        - name: Fetch the hosts in powervault me4 inventory
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts list --inventory powervault_me4_inventory -f human --filter "name"
+          changed_when: false
+          no_log: true
+          run_once: true
+          register: me4_storage
+
+        - name: Get auth string for powervault
+          shell: echo -n {{ powervault_me4_username }}_{{ powervault_me4_password }} | sha256sum
+          changed_when: false
+          register: auth_string
+          no_log: true
+          when: inventory_hostname not in me4_storage.stdout
+
+        - name: Get session key for powervault
+          uri:
+            url: https://{{ inventory_hostname }}/api/login/{{ auth_string.stdout | replace(" -", "") }}
+            method: GET
+            headers:
+              {'datatype': 'json'}
+            validate_certs: no
+          register: session_key
+          when: inventory_hostname not in me4_storage.stdout
+
+        - name: Assert me4_powervault
+          uri:
+            url: https://{{ inventory_hostname }}/api/show/system
+            method: GET
+            body_format: json
+            validate_certs: no
+            use_proxy: no
+            headers:
+              {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
+          register: system_info
+          when: inventory_hostname not in me4_storage.stdout
+
+        - name: Set powervault_me4_status
+          set_fact:
+            powervault_me4_status: true
+          when:
+            - inventory_hostname not in me4_storage.stdout
+            - me4_powervault_search_key in system_info.json.system[0]['scsi-product-id']
+      rescue:
+        - name: Failed while adding device to powervault_me4_inventory
+          debug:
+            msg: "{{ powervault_me4_fail_msg }}"
+      when:
+        - not idrac_inventory_status
+        - not ethernet_inventory_status
+        - not ib_inventory_status
+        - not powervault_me4_status
+        - inventory_hostname not in idrac_hosts.stdout
+
+    - name: Add the host to awx powervault_me4_inventory
+      block:
+        - name: Add the host to awx powervault_me4_inventory if not present
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts create --name {{ inventory_hostname }} --inventory powervault_me4_inventory
+          changed_when: true
+          no_log: true
+      rescue:
+        - name: Failed while adding device to powervault_me4_inventory
+          debug:
+            msg: "{{ powervault_me4_fail_msg }}"
+      when: powervault_me4_status

+ 30 - 13
control_plane/roles/collect_device_info/tasks/main.yml

@@ -1,19 +1,36 @@
 # Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
 #
-#      http://www.apache.org/licenses/LICENSE-2.0
+#     http://www.apache.org/licenses/LICENSE-2.0
 #
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This role will not group the devices if user provides invalid credentials
 ---
 
-# Will be updated later in each PR
-- name: Pass
-  debug:
-    msg: "Pass"
+- name: Check if provisioned host file exists
+  stat:
+    path: "{{ mgmt_provisioned_hosts_file }}"
+  register: provisioned_file
+
+- name: Check the mgmt_provisioned_hosts_file output
+  command: cat {{ mgmt_provisioned_hosts_file }}
+  changed_when: false
+  register: mgmt_hosts
+  when: provisioned_file.stat.exists
+
+- name: Create device_inventory
+  add_host:
+    name: "{{ item }}"
+    groups: "device_inventory"
+  with_items: "{{ mgmt_hosts.stdout_lines }}"
+  when: 
+    - provisioned_file.stat.exists
+    - item | trim | length > 1

+ 36 - 0
control_plane/roles/collect_device_info/vars/main.yml

@@ -0,0 +1,36 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.​0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+
+# vars file for collect_device_info role
+# This role will not group the devices if user provides invalid credentials
+
+# Usage main.yml
+mgmt_provisioned_hosts_file: "{{ role_path }}/files/mgmt_provisioned_hosts.yml"
+
+# Usage create_inventory.yml
+infiniband_status_code: 302
+idrac_search_key: "Integrated Dell Remote Access Controller"
+ethernet_search_key: "OS10"
+infiniband_search_key: "MLNX-OS"
+me4_powervault_search_key: "ME4"
+idrac_inventory_fail_msg: "Failed. Unable to add {{ inventory_hostname }} to idrac_inventory"
+ethernet_inventory_fail_msg:  "Failed. Unable to add {{ inventory_hostname }} to ethernet_inventory"
+ib_inventory_fail_msg: "Failed. Unable to add {{ inventory_hostname }} to ib_inventory"
+powervault_me4_fail_msg: "Failed. Unable to add {{ inventory_hostname }} to powervault_me4_inventory"
+base_vars_file: "{{ playbook_dir }}/../../../input_params/base_vars.yml"
+login_vars_file: "{{ playbook_dir }}/../../../input_params/login_vars.yml"
+login_vault_file: "{{ playbook_dir }}/../../../input_params/.login_vault_key"
+file_perm: '0644'

+ 21 - 17
control_plane/roles/collect_node_info/files/add_host.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,36 +12,40 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-
-- name: Check if host already exists
-  command: awk "{{ '/'+ item + '/' }}" /root/inventory
-  register: check_host
-  changed_when: no
-
 - name: Initialise host description
   set_fact:
     host_description: "Description Unavailable"
-
+    
 - name: Fetch description
   set_fact:
     host_description: "CPU:{{ hostvars[item]['ansible_processor_count'] }}
     Cores:{{ hostvars[item]['ansible_processor_cores'] }}
     Memory:{{ hostvars[item]['ansible_memtotal_mb'] }}MB
     BIOS:{{ hostvars[item]['ansible_bios_version'] }}"
-  when: not check_host.stdout | regex_search(item)
   ignore_errors: yes
 
-- name: Add host
-  lineinfile:
-    path:  "/root/inventory"
-    line: "    {{ item }}:\n      _awx_description: {{ host_description }}"
+- name: Fetch the hosts in awx node inventory
+  command: >-
+    awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+    --conf.insecure hosts list --inventory node_inventory
+  changed_when: false
+  no_log: true
   when:
-    - not check_host.stdout | regex_search(item)
-    - host_description != "Description Unavailable"
+     - host_description != "Description Unavailable"
+  register: hosts
+  ignore_errors: yes
+  
+- name: Add the host to awx node inventory if not present
+  command: >-
+    awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+    --conf.insecure hosts create --name {{ item }} --inventory node_inventory
+  changed_when: true
+  when: item not in hosts.stdout
+  no_log: true
+  ignore_errors: yes
 
 - name: Host added msg
   debug:
     msg: "{{ host_added_msg + item }}"
   when:
-    - not check_host.stdout | regex_search(item)
-    - host_description != "Description Unavailable"
+    - host_description != "Description Unavailable"

+ 99 - 22
control_plane/roles/collect_node_info/files/create_inventory.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,7 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-
 - name: Find reachable hosts
   hosts: all
   gather_facts: false
@@ -44,14 +43,29 @@
     - name: Include vars file of inventory role
       include_vars: ../vars/main.yml
 
-- name: Set hostname on reachable nodes and gather facts
+    - name: Check if omnia config file is encrypted
+      command: "cat {{ omnia_config_file }}"
+      changed_when: false
+      register: config_content
+      #no_log: True
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt "{{ omnia_config_file }}"
+        --vault-password-file "{{ omnia_config_vault_file }}"
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+
+    - name: Include vars file of inventory role
+      include_vars: "{{ omnia_config_file }}"
+
+- name: Set hostname for reachable nodes and gather facts
   hosts: reachable
   gather_facts: False
   ignore_unreachable: true
-  remote_user: "{{ cobbler_username }}"
+  remote_user: "{{ host_username }}"
   vars:
-    ansible_password: "{{ cobbler_password }}"
-    ansible_become_pass: "{{ cobbler_password }}"
+    ansible_password: "{{ host_password }}"
+    ansible_become_pass: "{{ host_password }}"
     ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
     mapping_file_present: ""
   tasks:
@@ -65,21 +79,21 @@
       changed_when: false
       ignore_errors: true
 
-    - name: Check if IP present in mapping file
-      command: grep "{{ inventory_hostname }}" ../../provision/files/new_mapping_file.csv
+    - name: Check if IP is present in mapping file
+      command: grep "{{ inventory_hostname }}" ../../provision_cobbler/files/new_host_mapping_file.csv
       delegate_to: localhost
       register: file_present
       when: mapping_file | bool == true
       ignore_errors: true
 
-    - name: Set fact if mapping file present
+    - name: Set fact if mapping file is present
       set_fact:
         mapping_file_present: "{{ file_present.stdout }}"
       when: mapping_file | bool == true
       ignore_errors: true
 
     - name: Get the static hostname from mapping file
-      shell: awk -F',' '$3 == "{{ inventory_hostname }}" { print $2 }' ../../provision/files/new_mapping_file.csv
+      shell: awk -F',' '$3 == "{{ inventory_hostname }}" { print $2 }' ../../provision_cobbler/files/new_host_mapping_file.csv
       delegate_to: localhost
       when: ('localhost' in hostname_check.stdout) and (mapping_file_present != "" ) and ( mapping_file | bool == true )
       register: host_name
@@ -87,36 +101,34 @@
 
     - name: Set the hostname from mapping file
       hostname:
-        name: "{{ host_name.stdout }}"
+        name: "{{ host_name.stdout + '.' + hostvars['localhost']['domain_name'] }}"
       when: ('localhost' in hostname_check.stdout) and (mapping_file_present != "" ) and  (mapping_file | bool == true )
       ignore_errors: true
-    
+
     - name: Set the hostname if hostname not present mapping file
       hostname:
-        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] + '.' + hostvars['localhost']['domain_name'] }}"
       when: ('localhost' in hostname_check.stdout) and (file_present.rc != 0) and (mapping_file | bool == true )
       ignore_errors: true
 
     - name: Set the system hostname
       hostname:
-        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1]+'.'+ hostvars['localhost']['domain_name'] }}"
       when: ('localhost' in hostname_check.stdout) and (mapping_file | bool == false)
       ignore_errors: true
 
     - name: Add new hostname to /etc/hosts from mapping file
       lineinfile:
         dest: /etc/hosts
-        regexp: '^127\.0\.0\.1[ \t]+localhost'
-        line: "127.0.0.1 localhost {{ host_name.stdout }}"
+        line: "{{ inventory_hostname }} {{ host_name.stdout + '.' + hostvars['localhost']['domain_name'] }}"
         state: present
       when: ('localhost' in hostname_check.stdout) and ( mapping_file_present != "" ) and ( mapping_file | bool == true )
       ignore_errors: true
 
-    - name: Add new hostname to /etc/hosts if hostname not present mapping fil
+    - name: Add new hostname to /etc/hosts if hostname not present mapping file
       lineinfile:
         dest: /etc/hosts
-        regexp: '^127\.0\.0\.1[ \t]+localhost'
-        line: "127.0.0.1 localhost compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+        line: "{{ inventory_hostname }} compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1]+'.'+ hostvars['localhost']['domain_name'] }}"
         state: present
       when: ('localhost' in hostname_check.stdout) and ( file_present.rc != 0 ) and ( mapping_file | bool == true )
       ignore_errors: true
@@ -124,8 +136,7 @@
     - name: Add new hostname to /etc/hosts
       lineinfile:
         dest: /etc/hosts
-        regexp: '^127\.0\.0\.1[ \t]+localhost'
-        line: "127.0.0.1 localhost compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+        line: "{{ inventory_hostname }} compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] +'.'+ hostvars['localhost']['domain_name'] }}"
         state: present
       when: ('localhost' in hostname_check.stdout) and (mapping_file | bool == false )
       ignore_errors: true
@@ -135,6 +146,72 @@
   connection: local
   gather_facts: false
   tasks:
+    - name: Encrypt omnia_config.yml file
+      command: >-
+        ansible-vault encrypt "{{ omnia_config_file }}"
+        --vault-password-file "{{ omnia_config_vault_file }}"
+      changed_when: false
+
+    - name: Update omnia_config.yml permissions
+      file:
+        path: "{{ omnia_config_file }}"
+        mode: "{{ file_perm }}"
+
+    - name: Check if tower_config_file file is encrypted
+      command: cat "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      changed_when: false
+      no_log: true
+      register: tower_config_content
+      run_once: true
+
+    - name: Decrypt tower_config_file
+      command: >-
+        ansible-vault decrypt "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        --vault-password-file "{{ playbook_dir }}/../../webui_awx/files/.tower_vault_key"
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in tower_config_content.stdout"
+      run_once: true
+
+    - name: Change file permissions
+      file:
+        path: "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        mode: "{{ file_perm }}"
+
+    - name: Fetch awx host
+      command: grep "host:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_host
+      changed_when: false
+      run_once: true
+
+    - name: Fetch awx username
+      command: grep "username:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_username
+      changed_when: false
+      run_once: true
+      no_log: true
+
+    - name: Fetch awx password
+      command: grep "password:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_password
+      changed_when: false
+      run_once: true
+      no_log: true
+
+    - name: Set awx variables
+      set_fact:
+        awx_host: "{{ fetch_awx_host.stdout | regex_replace('host: ','') }}"
+        awx_username: "{{ fetch_awx_username.stdout | regex_replace('username: ','') }}"
+        awx_password: "{{ fetch_awx_password.stdout | regex_replace('password: ','') }}"
+      no_log: true
+
+    - name: Encrypt tower_config_file
+      command: >-
+        ansible-vault encrypt "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        --vault-password-file "{{ playbook_dir }}/../../webui_awx/files/.tower_vault_key"
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in tower_config_content.stdout"
+      run_once: true
+
     - name: Update inventory file
       block:
         - name: Fetch facts and add new hosts
@@ -145,4 +222,4 @@
     - name: Show unreachable hosts
       debug:
         msg: "{{ host_unreachable_msg }} + {{ groups['ungrouped'] }}"
-      when: "'ungrouped' in groups"
+      when: "'ungrouped' in groups"

+ 42 - 52
control_plane/roles/collect_node_info/tasks/main.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,85 +16,75 @@
   set_fact:
     ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
 
-- name: Check if provisioned host file exists
-  stat:
-    path: "{{ role_path }}/files/provisioned_hosts.yml"
-  register: provisioned_file_result
+- name: Fetch the execution environment
+  command: hostname
+  register: host_name
+  changed_when: false
 
-- name: Include vars file of control_plane_common role
-  include_vars: "{{ role_path }}/../control_plane_common/vars/main.yml"
-  no_log: True
+- name: Install sshpass
+  package:
+    name: sshpass
+    state: present
+  when: awx_search_key not in host_name.stdout
 
-- name: Include vars file of webui_awx role
-  include_vars: "{{ role_path }}/../webui_awx/vars/main.yml"
-  no_log: True
+- name: Check if provisioned host file exists
+  stat:
+    path: "{{ provisioned_hosts_file }}"
+  register: provisioned_file
 
-- name: Update inventory file
+- name: Include variable file base_vars.yml
+  include_vars: "{{ base_vars_file }}"
+  
+- name: Update inventory
   block:
-    - name: Check if input config file is encrypted
-      command: cat {{ input_config_filename }}
+    - name: Check if {{ login_vars_file }} file is encrypted
+      command: cat {{ login_vars_file }}
       changed_when: false
+      no_log: true
       register: config_content
+      run_once: true
 
-    - name: Decrpyt appliance_config.yml
+    - name: Decrpyt {{ login_vars_file }}
       command: >-
-        ansible-vault decrypt {{ input_config_filename }}
-        --vault-password-file {{ vault_filename }}
+        ansible-vault decrypt {{ login_vars_file }}
+        --vault-password-file {{ login_vault_file }}
       when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      changed_when: false
+      run_once: true
 
-    - name: Include variable file appliance_config.yml
-      include_vars: "{{ input_config_filename }}"
-      no_log: True
-
+    - name: Include variable file {{ login_vars_file }}
+      include_vars: "{{ login_vars_file }}"
+      no_log: true
+      run_once: true
+    
     - name: Save input variables from file
       set_fact:
-        cobbler_password: "{{ provision_password }}"
         mapping_file: false
-        path_mapping_file: "{{ mapping_file_path }}"
-      no_log: True
 
     - name: Check the status for mapping file
       set_fact:
         mapping_file: true
-      when: path_mapping_file != ""
+      when: host_mapping_file_path
 
-    - name: Encrypt input config file
+    - name: Encrypt {{ login_vars_file }}
       command: >-
-        ansible-vault encrypt {{ input_config_filename }}
-        --vault-password-file {{ vault_filename }}
+        ansible-vault encrypt {{ login_vars_file }}
+        --vault-password-file {{ login_vault_file }}
       changed_when: false
-
-    - name: Check if inventory file already exists
-      file:
-        path: "/root/inventory"
-        state: absent
-
-    - name: Create empty inventory file
-      copy:
-        dest:  "/root/inventory"
-        content: |
-          ---
-          all:
-            hosts:
-        owner: root
-        mode: 0775
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      run_once: true
 
     - name: Add inventory playbook
       block:
         - name: add hosts with description to inventory file
           command: >-
-            ansible-playbook -i {{ role_path }}/files/provisioned_hosts.yml
+            ansible-playbook -i {{ provisioned_hosts_file }}
             {{ role_path }}/files/create_inventory.yml
-            --extra-vars "cobbler_username={{ cobbler_username }} cobbler_password={{ cobbler_password }} mapping_file={{ mapping_file | bool }}"
+            --extra-vars "host_username={{ host_username }} host_password={{ provision_password }} mapping_file={{ mapping_file | bool }}"
           no_log: True
           register: register_error
       rescue:
         - name: Fail if host addition was not successful
           fail:
-            msg: "{{ register_error.stderr + register_error.stdout | regex_replace(cobbler_username) | regex_replace(cobbler_password) }}"
-
-  when: provisioned_file_result.stat.exists
-
-- name: push inventory to AWX
-  command: awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source /root/inventory
-  when: provisioned_file_result.stat.exists
+            msg: "{{ register_error.stderr + register_error.stdout | regex_replace(host_username) | regex_replace(provision_password) }}"
+  when: provisioned_file.stat.exists

+ 12 - 3
control_plane/roles/collect_node_info/vars/main.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,5 +12,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-host_added_msg: "Added host to inventory: "
-host_unreachable_msg: "Following hosts are unreachable: "
+provisioned_hosts_file: "{{ role_path }}/files/provisioned_hosts.yml"
+base_vars_file: "{{ role_path }}/../../input_params/base_vars.yml"
+login_vars_file: "{{ role_path }}/../../input_params/login_vars.yml"
+login_vault_file: "{{ role_path }}/../../input_params/.login_vault_key"
+omnia_config_file: "../../../../omnia_config.yml"
+omnia_config_vault_file: "../../../../.omnia_vault_key"
+file_perm: '0644'
+host_username: root
+host_added_msg: "Added host to awx inventory: "
+host_unreachable_msg: "Following hosts are unreachable: "
+awx_search_key: awx

+ 8 - 34
control_plane/roles/control_plane_common/tasks/fetch_base_inputs.yml

@@ -53,7 +53,6 @@
     snmp_enabled: false   
     mngmnt_mapping_file: false
     host_mapping_file: false
-    ib_mapping_file: false
 
 - name: Verify snmp_trap_destination IP address
   set_fact:
@@ -165,7 +164,7 @@
       - provision_method == "pxe" or provision_method == "idrac"
     success_msg: "{{ success_provision_method }}"
     fail_msg: "{{ fail_provision_method }}"
-    
+
 - name: Check timezone file
   command: grep -Fx "{{ timezone }}" {{ role_path }}/files/timezone.txt
   ignore_errors: yes
@@ -199,7 +198,7 @@
     msg: "{{ invalid_iso_file_path }}"
   when: ( result_path_iso_file.stat.exists ) and ( ".iso" not in iso_file_path )
 
-####management_net_dhcp_start_end_range
+#### management_net_dhcp_start_end_range
 - name: Assert management network nic
   assert:
     that:
@@ -244,6 +243,7 @@
     that:
       - mngmnt_network_dhcp_start_range
       - mngmnt_network_dhcp_start_range | ipv4
+      - mngmnt_network_dhcp_start_range != mngmnt_network_ip
       - mngmnt_network_dhcp_start_range != mngmnt_network_dhcp_end_range
       - dhcp_start_mgmnt == mngmnt_network_subnet
       - dhcp_start_mgmnt == dhcp_end_mgmnt
@@ -255,6 +255,7 @@
     that:
       - mngmnt_network_dhcp_end_range
       - mngmnt_network_dhcp_end_range | ipv4
+      - mngmnt_network_dhcp_end_range != mngmnt_network_ip
       - mngmnt_network_dhcp_start_range != mngmnt_network_dhcp_end_range
       - dhcp_end_mgmnt == mngmnt_network_subnet
       - dhcp_start_mgmnt == dhcp_end_mgmnt
@@ -279,13 +280,6 @@
 #########
 
 ###Host network####
-- name: Assert host network nic
-  assert:
-    that:
-      - host_network_nic in nic_addr_up.stdout
-    success_msg: "{{ success_msg_host_network_nic }}"
-    fail_msg: "{{ fail_msg_host_network_nic }}"
-
 - name: Fetch the host network ip, netmask and subnet
   set_fact:
     hpc_ip: "{{ lookup('vars','ansible_'+host_network_nic).ipv4.address }}"
@@ -323,6 +317,7 @@
     that:
       - host_network_dhcp_start_range
       - host_network_dhcp_start_range | ipv4
+      - host_network_dhcp_start_range != hpc_ip
       - host_network_dhcp_start_range != host_network_dhcp_end_range
       - dhcp_start_host == subnet
       - dhcp_start_host == dhcp_end_host
@@ -334,6 +329,7 @@
     that:
       - host_network_dhcp_end_range
       - host_network_dhcp_end_range | ipv4
+      - host_network_dhcp_end_range != hpc_ip
       - host_network_dhcp_start_range != host_network_dhcp_end_range
       - dhcp_end_host == subnet
       - dhcp_start_host == dhcp_end_host
@@ -366,14 +362,6 @@
     fail_msg: "{{ fail_msg_different_nics }}"
 
 ########
-- name: Assert infiniband network nic
-  assert:
-    that:
-      - ib_network_nic in nic_addr_up.stdout
-    success_msg: "{{ success_msg_ib_network_nic }}"
-    fail_msg: "{{ fail_msg_ib_network_nic }}"
-  when: ib_switch_support
-
 - name: Fetch the infiniband network ip, netmask and subnet
   set_fact:
     ib_ip: "{{ lookup('vars','ansible_'+ib_network_nic).ipv4.address }}"
@@ -416,6 +404,7 @@
     that:
       - ib_network_dhcp_start_range
       - ib_network_dhcp_start_range | ipv4
+      - ib_network_dhcp_start_range != ib_ip
       - ib_network_dhcp_start_range != ib_network_dhcp_end_range
       - dhcp_start_ib == ib_subnet
       - dhcp_start_ib == dhcp_end_ib
@@ -428,6 +417,7 @@
     that:
       - ib_network_dhcp_end_range
       - ib_network_dhcp_end_range | ipv4
+      - ib_network_dhcp_end_range != ib_ip
       - ib_network_dhcp_start_range != ib_network_dhcp_end_range
       - dhcp_end_ib == ib_subnet
       - dhcp_start_ib == dhcp_end_ib
@@ -435,22 +425,6 @@
     fail_msg: "{{ fail_dhcp_range }} for infiniband network"
   when: ib_switch_support
 
-- name: Set the mapping file value for infiniband
-  set_fact:
-    ib_mapping_file: true
-  when: (ib_switch_support) and (ib_mapping_file_path | length > 0)
-
-- name: Assert valid infiniband_mapping_file_path
-  stat:
-    path: "{{ ib_mapping_file_path }}"
-  when: ib_switch_support and ib_mapping_file
-  register: result_ib_mapping_file
-
-- name : Valid infiniband_mapping_file_path
-  fail:
-    msg: "{{ invalid_mapping_file_path }} for infiniBand network configuration"
-  when: ib_mapping_file and (not result_ib_mapping_file.stat.exists)
-
 - name: Verify different nics with infiniband nic
   assert:
     that:

+ 1 - 1
control_plane/roles/control_plane_common/tasks/password_config.yml

@@ -200,4 +200,4 @@
 - name: Update login_vars.yml permission
   file:
     path: "{{ login_vars_filename }}"
-    mode: "{{ vault_file_perm }}"
+    mode: "{{ file_perm }}"

+ 65 - 3
control_plane/roles/control_plane_common/tasks/verify_omnia_params.yml

@@ -56,7 +56,19 @@
   when:
     - mariadb_password | length < 1 or
       k8s_version | length < 1 or
-      k8s_cni | length < 1
+      k8s_cni | length < 1 or
+      domain_name | length < 1
+
+- name: Validate login node parameters when login_node_reqd is set to true
+  fail:
+    msg: "{{ input_config_failure_msg }} for login_node"
+  when:
+    - ( domain_name | length < 1 or
+      realm_name | length < 1 or
+      directory_manager_password | length < 1 or
+      ipa_admin_password | length < 1 ) and
+      ( login_node_required and
+      host_mapping_file )
 
 - name: Assert mariadb_password
   assert:
@@ -91,13 +103,63 @@
     docker_password: "{{ docker_password }}"
   no_log: True
 
+- name: Verify the value of login_node_required
+  assert:
+    that:
+      - login_node_required == true or login_node_required == false
+    success_msg: "{{ login_node_required_success_msg }}"
+    fail_msg: "{{ login_node_required_fail_msg }}"
+
 - name: Validate the domain name
   assert:
     that:
       - domain_name is regex("^(?!-)[A-Za-z0-9-]+([\\-\\.]{1}[a-z0-9]+)*\\.[A-Za-z]{2,6}$")
     success_msg: "{{ domain_name_success_msg }}"
     fail_msg: "{{ domain_name_fail_msg }}"
-  when: domain_name | length > 0
+  when:
+    - host_mapping_file
+    - login_node_required
+
+- name: Validate the realm name
+  assert:
+    that:
+      - realm_name is regex("^(?!-)[A-Z0-9-]+([\\-\\.]{1}[a-z0-9]+)*\\.[A-Z]{2,6}$")
+      - '"." in realm_name'
+    success_msg: "{{ realm_name_success_msg }}"
+    fail_msg: "{{ realm_name_fail_msg }}"
+  when:
+    - host_mapping_file
+    - login_node_required
+
+- name: Assert directory_manager_password
+  assert:
+    that:
+      - directory_manager_password | length > min_length | int - 1
+      - directory_manager_password | length < max_length | int + 1
+      - '"-" not in directory_manager_password '
+      - '"\\" not in directory_manager_password '
+      - '"\"" not in directory_manager_password '
+      - " \"'\" not in directory_manager_password "
+    success_msg: "{{ success_msg_directory_manager_password }}"
+    fail_msg: "{{ fail_msg_directory_manager_password }}"
+  when:
+    - host_mapping_file
+     - login_node_required
+
+- name: Assert ipa_admin_password
+  assert:
+    that:
+      - ipa_admin_password | length > min_length | int - 1
+      - ipa_admin_password | length < max_length | int + 1
+      - '"-" not in ipa_admin_password '
+      - '"\\" not in ipa_admin_password '
+      - '"\"" not in ipa_admin_password '
+      - " \"'\" not in ipa_admin_password "
+    success_msg: "{{ success_msg_ipa_admin_password }}"
+    fail_msg: "{{ fail_msg_ipa_admin_password }}"
+  when:
+    - host_mapping_file
+    - login_node_required
 
 - name: Encrypt input config file
   command: >-
@@ -108,4 +170,4 @@
 - name: Update omnia_config.yml permission
   file:
     path: "{{ role_path }}/../../../{{ config_filename }}"
-    mode: "{{ vault_file_perm }}"
+    mode: "{{ vault_file_perm }}"

+ 12 - 1
control_plane/roles/control_plane_common/vars/main.yml

@@ -75,6 +75,17 @@ success_msg_k8s_version: "Kubernetes Version Validated"
 fail_msg_k8s_version: "Failed. Kubernetes Version is unsupported or incorrect in omnia_config.yml"
 success_msg_k8s_cni: "Kubernetes CNI Validated"
 fail_msg_k8s_cni: "Failed. Kubernetes CNI is incorrect in omnia_config.yml"
+domain_name_success_msg: "domain name successfully validated"
+domain_name_fail_msg: "Failed. Incorrect format provided for domain name in omnia_config.yml"
+realm_name_success_msg: "realm_name successfully validated"
+realm_name_fail_msg: "Failed. Incorrect realm_name formate in omnia_config.yml"
+success_msg_directory_manager_password: "directory_manager_password successfully validated"
+fail_msg_directory_manager_password: "Failed. Incorrect format provided for directory_manager_password"
+success_msg_ipa_admin_password: "ipa_admin_password successfully validated"
+fail_msg_ipa_admin_password: "Failed. Incorrect format provided for ipa_admin_password"
+input_config_failure_msg: "Input parameters cannot be empty"
+login_node_required_success_msg: "Login_node_required successfully validated"
+login_node_required_fail_msg: "Failed. login_node_required can be either true or false"
 
 # Usage: fetch_base_inputs.yml
 base_vars_filename: "input_params/base_vars.yml"
@@ -88,9 +99,9 @@ fail_msg_public_nic: "Failed. Incorrect public nic provided in base_vars.yml"
 success_msg_k8s_pod_network_cidr: "Appliance k8s pod network cidr validated"
 fail_msg_k8s_pod_network_cidr: "Failed. Incorrect appliance k8s pod network cidr provided in base_vars.yml"
 success_awx_organization: "awx organization validated"
-fail_awx_organization: "Failed. Incorrect format in awx organization"
 success_provision_method: "Provision method validated"
 fail_provision_method: "Failed. Provision method can either be set to idrac or pxe"
+fail_awx_organization: "Failed. Incorrect format in awx organization"
 success_timezone_msg: "timezone validated"
 fail_timezone_msg: "Failed. Incorrect timezone provided. Please check the file timezone.txt in control_plane/roles/control_plane_common/files/ folder"
 fail_language: "Failed. Only en-US(english) language supported"

+ 6 - 11
control_plane/roles/control_plane_ib/files/Dockerfile

@@ -1,21 +1,17 @@
 # Dockerfile for creating the management network container
 
-FROM centos:7
+FROM centos:8
 
 # RPM REPOs
-RUN yum install -y \
+RUN dnf install -y \
     epel-release \
-    && yum clean all \
-    && rm -rf /var/cache/yum
-
-RUN yum update -y \
-    && yum clean all \
-    && rm -rf /var/cache/yum
+    && dnf clean all \
+    && rm -rf /var/cache/dnf
 
+RUN dnf install dhcp-server -y
 RUN yum install -y \
   ansible \
   cronie \
-  dhcp \
   net-tools \
   && yum clean all \
   &&  rm -rf /var/cache/yum
@@ -24,8 +20,7 @@ RUN mkdir /root/omnia
 
 #Copy Configuration files
 COPY dhcpd.conf  /etc/dhcp/dhcpd.conf
-#COPY mngmnt_container_configure.yml /root/
 
 RUN systemctl enable dhcpd
 
-CMD ["sbin/init"]
+CMD ["sbin/init"]

+ 0 - 48
control_plane/roles/control_plane_ib/files/dhcpd.conf

@@ -1,48 +0,0 @@
-
-# ******************************************************************
-# Cobbler managed dhcpd.conf file
-#
-# generated from cobbler dhcp.conf template ($date)
-# Do NOT make changes to /etc/dhcpd.conf. Instead, make your changes
-# in /etc/cobbler/dhcp.template, as /etc/dhcpd.conf will be
-# overwritten.
-#
-# ******************************************************************
-
-ddns-update-style interim;
-
-allow booting;
-allow bootp;
-
-ignore client-updates;
-set vendorclass = option vendor-class-identifier;
-
-option pxe-system-type code 93 = unsigned integer 16;
-
-subnet 172.25.0.0 netmask 255.255.0.0 {
-option subnet-mask 255.255.0.0;
-range dynamic-bootp 172.25.0.10 172.25.0.100;
-default-lease-time  21600;
-max-lease-time  43200;
-next-server 172.25.0.1;
-#insert the static DHCP leases for configuration here
-
-
-     class "pxeclients" {
-          match if substring (option vendor-class-identifier, 0, 9) = "PXEClient";
-          if option pxe-system-type = 00:02 {
-                  filename "ia64/elilo.efi";
-          } else if option pxe-system-type = 00:06 {
-                  filename "grub/grub-x86.efi";
-          } else if option pxe-system-type = 00:07 {
-                  filename "grub/grub-x86_64.efi";
-          } else if option pxe-system-type = 00:09 {
-                  filename "grub/grub-x86_64.efi";
-          } else {
-                  filename "pxelinux.0";
-          }
-     }
-
-}
-
-#end for

+ 0 - 43
control_plane/roles/control_plane_ib/files/infiniband_inventory_creation.yml

@@ -1,43 +0,0 @@
-#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
----
-
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  tasks:
-    - name: Read dhcp file
-      set_fact:
-        var: "{{ lookup('file', '/var/lib/dhcpd/dhcpd.leases').split()| unique | select| list }}"
-
-    - name: Filter the ip
-      set_fact:
-        vars_new: "{{ var| ipv4('address')| to_nice_yaml}}"
-
-    - name: Create the static ip
-      shell: awk -F',' 'NR >1{print $3}' omnia/appliance/roles/provision/files/new_mapping_file.csv > static_hosts.yml
-      changed_when: false
-      ignore_errors: true
-
-    - name: Create the dynamic inventory
-      shell: |
-        echo "[all]" >  omnia/appliance/roles/inventory/files/provisioned_hosts.yml
-        echo "{{ vars_new }}" > temp.txt
-        egrep -o '[1-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' temp.txt >>dynamic_hosts.yml
-      changed_when: false
-      ignore_errors: true
-
-    - name: Final inventory
-      shell: cat dynamic_hosts.yml static_hosts.yml| sort -ur  >> omnia/appliance/roles/inventory/files/provisioned_hosts.yml
-      changed_when: false

+ 2 - 2
control_plane/roles/control_plane_ib/files/k8s_infiniband.yml

@@ -21,11 +21,11 @@ spec:
       volumes:
         - name: omnia-storage
           hostPath:
-            path: /home/omnia/
+            path: /root/omnia
             type: Directory
       containers:
         - name: infiniband-container
-          image: 'localhost/infiniband_container:latest'
+          image: 'localhost/infiniband-container:latest'
           imagePullPolicy: Never
           command:
             - /sbin/init

+ 2 - 2
control_plane/roles/control_plane_ib/files/temp_dhcp.template

@@ -22,8 +22,8 @@ option pxe-system-type code 93 = unsigned integer 16;
 subnet subnet_mask netmask net_mask {
 option subnet-mask net_mask;
 range dynamic-bootp start end;
-default-lease-time  21600;
-max-lease-time  43200;
+default-lease-time 26100;
+max-lease-time 43200;
 next-server next_server;
 #insert the static DHCP leases for configuration here
 

+ 4 - 10
control_plane/roles/control_plane_ib/tasks/check_prerequisites.yml

@@ -32,7 +32,7 @@
   - name: Set status for backup file
     set_fact:
       infiniband_backup_map_status: true
-    when: infiniband_backup_map.stat.exists == true  
+    when: infiniband_backup_map.stat.exists 
   rescue:
   - name: Message
     debug:
@@ -42,14 +42,14 @@
 - name: Inspect the infiniband_container image
   command: "buildah images {{ infiniband_image_name }}"
   register: infiniband_container_image_result
-  ignore_errors: true
+  failed_when: false
   changed_when: false
   tags: install
 
 - name: Check infiniband_container status on the machine
   command: kubectl get pods -n network-config
   register: infiniband_container_result
-  ignore_errors: true
+  failed_when: false
   changed_when: false
   tags: install
 
@@ -63,10 +63,4 @@
   set_fact:
     infiniband_container_status: true
   when: "'infiniband-container' in infiniband_container_result.stdout"
-  tags: install
-
-- name: Update infiniband_container  status
-  set_fact:
-    infiniband_container_config_status: true
-  when:
-    - infiniband_container_status == true
+  tags: install

+ 2 - 2
control_plane/roles/control_plane_ib/tasks/configure_infiniband_container.yml

@@ -17,13 +17,13 @@
   command: kubectl get pods -n network-config
   changed_when: false
   register: infiniband_pod_status
-  ignore_errors: true
+  failed_when: false
 
 - name: Deploy infiniband pod
   command: "kubectl apply -f {{ role_path }}/files/k8s_infiniband.yml"
   changed_when: true
   tags: install
-  when: infiniband_container_status == true and  infiniband_container_config_status == false
+  when: infiniband_container_status and  (not infiniband_container_config_status)
 
 - name: Wait for infiniband pod to come to ready state
   command: kubectl wait --for=condition=ready -n network-config pod -l app=infiniband

+ 1 - 1
control_plane/roles/control_plane_ib/tasks/dhcp_configure.yml

@@ -43,4 +43,4 @@
   replace:
     path: "{{ role_path }}/files/dhcpd.conf"
     regexp: '^next-server next_server;'
-    replace: 'next-server {{ ib_ip }};'
+    replace: 'next-server {{ ib_ip }};'

+ 3 - 8
control_plane/roles/control_plane_ib/tasks/main.yml

@@ -14,7 +14,7 @@
 ---
 
 # Tasks file for infiniband
-
+-
 - name: Check if IB switch is supported
   block:
     - name: Check infiniband_container status on machine
@@ -29,15 +29,11 @@
       when: not infiniband_container_status
 
     - name: Include variable file base_vars.yml
-      include_vars: "{{ base_file }}"
+      include_vars: ../../../input_params/base_vars.yml
 
     - name: Dhcp Configuration
       import_tasks: dhcp_configure.yml
-      when: (not infiniband_container_image_status) or ( infiniband_backup_map_status == true)
-
-    #- name: Mapping file validation
-    #  import_tasks: mapping_file.yml
-    #  when: (not infiniband_container_image_status) and (mapping_file == true) or ( backup_map_status == true)
+      when: (not infiniband_container_image_status)
 
     - name: infiniband_container image creation
       import_tasks: infiniband_container_image.yml
@@ -57,5 +53,4 @@
             verbosity: 2
           when: not infiniband_container_status
       tags: install
-
   when: ib_switch_support

+ 0 - 1
control_plane/roles/control_plane_ib/vars/main.yml

@@ -21,4 +21,3 @@ infiniband_image_tag: latest
 mount_path: /root/omnia
 infiniband_message_skipped: "The container is already present"
 infiniband_message_installed: "The container is installed"
-ib_base_file: "{{ role_path }}/../../input_params/base_vars.yml" 

+ 2 - 2
control_plane/roles/powervault_me4/tasks/volume.yml

@@ -67,7 +67,7 @@
   fail:
     msg: "{{ pv_disk2.json.status[0].response }}"
   when:
-    - powervault_me4_pool_typ|lower e== "linear"
+    - powervault_me4_pool_type|lower == "linear"
     - pv_disk2.json.status[0] ['response-type'] == "Error"
 
 - name: Create volume1
@@ -148,4 +148,4 @@
     msg: "{{ pv_vol4.json.status[0].response }}"
   when:
     - powervault_me4_pool_type|lower == "linear"
-    - pv_vol4.json.status[0]['response-type'] == "Error"
+    - pv_vol4.json.status[0]['response-type'] == "Error"

+ 2 - 0
control_plane/test/test_ib_inventory

@@ -0,0 +1,2 @@
+[infiniband]
+1.2.3.4

+ 101 - 0
control_plane/test/test_ib_mtu.yml

@@ -0,0 +1,101 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+---
+- name: Get MTU of mgmt5
+  hosts: infiniband
+  gather_facts: no
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+  tasks:
+    - name: Set credentials and variables
+      set_fact:
+        username: "{{ username }}"
+        password: "{{ password }}"
+        filtered_dict: {}
+      no_log: true
+      tags: reboot,mtu
+
+    - name: Authenticate
+      block:
+        - name: Authenticate to switch- "{{ inventory_hostname }}"
+          uri:
+            url: http://{{ inventory_hostname }}/admin/launch?script=rh&template=login&action=login
+            method: POST
+            body_format: form-urlencoded
+            body:
+              f_user_id: "{{ username }}"
+              f_password: "{{ password }}"
+              enter: Sign in
+            status_code: 302
+          register: login
+          no_log: true
+ 
+        - name: Verify authentication status
+          fail:
+            msg: "Authentication failed"
+          when: login.set_cookie is undefined
+      rescue:
+        - name: Filtered response creation
+          set_fact:
+            filtered_dict: "{{filtered_dict |combine({item.key: item.value})}}"
+          when: item.key not in 'invocation'
+          with_dict: "{{ login }}"
+          no_log: true
+
+        - name: Authentication failure response
+          fail: 
+            msg: "{{ filtered_dict }}"
+      tags: reboot,mtu
+
+    - name: get MTU from ib {{ validation_port }}
+      uri:
+        url: http://{{ inventory_hostname }}/admin/launch?script=json
+        method: POST
+        body_format: json
+        headers:
+          Cookie: "{{ login.set_cookie.split(';')[0] }}"
+        body:
+          {
+          "commands": 
+           [
+             "show interfaces ib {{ validation_port }}"
+           ]
+          }
+        return_content: yes
+      register: value
+      tags: mtu
+
+    - name: print reistered o/p
+      debug:
+        msg: "{{ value }}"
+      tags: mtu
+
+    - name: reboot IB switch
+      uri:
+        url: http://{{ inventory_hostname }}/admin/launch?script=json
+        method: POST
+        body_format: json
+        headers:
+          Cookie: "{{ login.set_cookie.split(';')[0] }}"
+        body:
+          {
+          "commands":
+           [
+             "reload"
+           ]
+          }
+        return_content: yes
+      tags: reboot

+ 333 - 0
control_plane/test/test_infiniband_config.yml

@@ -0,0 +1,333 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Testcase OMNIA_1.1_IB_TC_007
+# Execute infiniband.yml with both valid Global and valid interface configs in ib_config.yml
+- name: OMNIA_1.1_IB_TC_007
+  hosts: infiniband
+  gather_facts: false
+  tags: VERIFY_OMNIA_01
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+     - name: Executing network_ib role
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        host_name: "{{ ib_host_name }}"
+        template_name: "{{ template_value }}"
+        job_template_name: "{{ job_name }}"
+        playbook_path: "{{ ib_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+     
+     - block:
+        - name: Validate default flow
+          assert:
+            that:
+              - ib_success_msg in job_status.status
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+          changed_when: false
+   
+     - name: Backup of ib_vars.yml
+       copy:
+         src: "{{ ib_vars_dir }}"
+         dest: "{{ ib_vars_backup_dir }}"
+         mode: "{{ file_perm }}"
+
+     - name: Set MTU of port {{ port_num }}
+       lineinfile:
+        dest: "{{ ib_vars_dir }}"
+        insertbefore: "{{ search_line }}"
+        line: "{{ line_to_add }}"
+
+     - name: Execute network_ib role after setting MTU
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        host_name: "{{ ib_host_name }}"
+        template_name: "{{ template_value }}"
+        job_template_name: "{{ job_name }}"
+        playbook_path: "{{ ib_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+
+     - name: Get MTU of port {{ port_num }}
+       command: ansible-playbook -i "{{ inventory_dir }}" "{{ ib_mtu_path }}" --tags 'mtu'
+       register: mtuvalue
+       changed_when: false
+
+     - block:
+        - name: Validate MTU
+          assert:
+            that:
+              - 'var_check in mtuvalue.stdout'
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+            
+# Testcase OMNIA_1.1_IB_TC_005
+# set save_changes_to_startup to false
+- name: OMNIA_1.1_IB_TC_005
+  hosts: infiniband
+  gather_facts: false
+  tags: TC_005
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:        
+     - name: Reload IB switch 
+       command: ansible-playbook -i "{{ inventory_dir }}" "{{ ib_mtu_path }}" --tags 'reboot'
+       changed_when: false
+
+     - name: Pausing for IB switch to come up
+       pause:
+         minutes: "{{ time_to_pause }}"
+   
+     - name: Get MTU of port {{ port_num }}
+       command: ansible-playbook -i "{{ inventory_dir }}" "{{ ib_mtu_path }}" --tags 'mtu'
+       register: mtuvalue
+       changed_when: false
+          
+     - block:
+        - name: Validate that MTU is changed
+          assert:
+            that:
+              - 'var_check not in mtuvalue.stdout'
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+            
+# Testcase OMNIA_1.1_IB_TC_006
+# set save_changes_to_startup to True            
+- name: OMNIA_1.1_IB_TC_006
+  hosts: infiniband
+  gather_facts: false
+  tags: TC_006
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:        
+     - name: Set save_changes_to_startup to True
+       ansible.builtin.replace:
+         dest: "{{ ib_vars_dir }}"
+         regexp: 'save_changes_to_startup: false'
+         replace: 'save_changes_to_startup: True'
+                          
+     - name: Execute network_ib role as port {{ port_num }} has mtu set in ib_Vars
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        host_name: "{{ ib_host_name }}"
+        template_name: "{{ template_value }}"
+        job_template_name: "{{ job_name }}"
+        playbook_path: "{{ ib_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+       
+     - name: Reload IB switch 
+       command: ansible-playbook -i "{{ inventory_dir }}" "{{ ib_mtu_path }}" --tags 'reboot'
+       changed_when: false
+       
+     - name: Pausing for IB switch to come up
+       pause:
+         minutes: "{{ time_to_pause }}"
+  
+     - name: Get MTU of port {{ port_num }}
+       command: ansible-playbook -i "{{ inventory_dir }}" "{{ ib_mtu_path }}" --tags 'mtu'
+       register: mtuvalue
+       changed_when: false
+       
+     - block:
+        - name: Validate that MTU is not changed
+          assert:
+            that:
+              - 'var_check in mtuvalue.stdout'
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+       
+# Testcase OMNIA_1.1_IB_TC_010
+# Execute infiniband.yml with valid interface and incorrect Global configs in ib_config.yml
+- name: OMNIA_1.1_IB_TC_010
+  hosts: infiniband
+  gather_facts: false
+  tags: TC_010
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml    
+  tasks:
+     - name: Making gobal config incorrect
+       lineinfile:
+        dest: "{{ ib_vars_dir }}"
+        insertafter: 'mellanox_switch_config:'
+        line: "gibberish inserted"
+        
+     - name: Executing network_ib role
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        host_name: "{{ ib_host_name }}"
+        template_name: "{{ template_value }}"
+        job_template_name: "{{ job_name }}"
+        playbook_path: "{{ ib_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+
+     - block:
+        - name: Validate role exec output
+          assert:
+            that:
+              - ib_fail_msg in job_status.status
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+      
+# Testcase OMNIA_1.1_IB_TC_009
+# Execute infiniband.yml with only interface and no Global configs in ib_config.yml
+- name: OMNIA_1.1_IB_TC_009
+  hosts: infiniband
+  gather_facts: false
+  tags: TC_009
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+     - name: Removing global config from ib_vars.yml
+       lineinfile:
+        dest: "{{ ib_vars_dir }}"
+        state: absent
+        regexp: "^gibberish inserted"
+        
+     - name: Executing network_ib role
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        host_name: "{{ ib_host_name }}"
+        template_name: "{{ template_value }}"
+        job_template_name: "{{ job_name }}"
+        playbook_path: "{{ ib_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+
+     - name: Validate role exec output
+       assert:
+         that:
+           - ib_success_msg in job_status.status
+         success_msg: "{{ success_message }}"
+         fail_msg: "{{ fail_case }}"
+       changed_when: false
+       
+# Testcase OMNIA_1.1_IB_TC_011
+# Execute infiniband.yml with valid Global and incorrect interface configs in ib_config.yml
+- name: OMNIA_1.1_IB_TC_011
+  hosts: infiniband
+  gather_facts: false
+  tags: TC_011
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+     - name: Make interface config incorrect
+       lineinfile:
+        dest: "{{ ib_vars_dir }}"
+        insertafter: "{{ line_to_search }}"
+        line: "gibberish inserted"
+        
+     - name: Executing network_ib role
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        host_name: "{{ ib_host_name }}"
+        template_name: "{{ template_value }}"
+        job_template_name: "{{ job_name }}"
+        playbook_path: "{{ ib_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+ 
+     - name: Validate role exec output
+       assert:
+         that:
+           - ib_fail_msg in job_status.status
+         success_msg: "{{ success_message }}"
+         fail_msg: "{{ fail_case }}"
+      
+# Testcase OMNIA_1.1_IB_TC_008
+# Execute infiniband.yml with only Global and no interface configs in ib_config.yml
+- name: OMNIA_1.1_IB_TC_008
+  hosts: infiniband
+  gather_facts: false
+  tags: TC_008
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+     - name: Removing interface config
+       ansible.builtin.command: sed -i '49,196d' "{{ ib_vars_dir }}"
+       args:
+        warn: no
+       changed_when: false
+     
+     - name: Executing network_ib role
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        host_name: "{{ ib_host_name }}"
+        template_name: "{{ template_value }}"
+        job_template_name: "{{ job_name }}"
+        playbook_path: "{{ ib_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+
+     - name: Validate role exec output
+       assert:
+         that:
+           - ib_success_msg in job_status.status
+         success_msg: "{{ success_message }}"
+         fail_msg: "{{ fail_case }}"
+       changed_when: false
+
+     - name: Restore orginal ib_vars file
+       copy:
+         src: "{{ ib_vars_backup_dir }}"
+         dest: "{{ ib_vars_dir }}"
+         mode: "{{ file_perm }}"
+         
+     - name: Set save_changes_to_startup to True
+       ansible.builtin.replace:
+         dest: "{{ ib_vars_dir }}"
+         regexp: 'save_changes_to_startup: false'
+         replace: 'save_changes_to_startup: True'
+                          
+     - name: Execute network_ib role to set default IB config as is
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        host_name: "{{ ib_host_name }}"
+        template_name: "{{ template_value }}"
+        job_template_name: "{{ job_name }}"
+        playbook_path: "{{ ib_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+       
+     - name: Set save_changes_to_startup back to false
+       ansible.builtin.replace:
+         dest: "{{ ib_vars_dir }}"
+         regexp: 'save_changes_to_startup: True'
+         replace: 'save_changes_to_startup: false'

+ 156 - 0
control_plane/test/test_infiniband_facts.yml

@@ -0,0 +1,156 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Testcase OMNIA_1.1_IB_TC_002
+# Execute ib_facts.yml with valid IP and valid credentials
+- name: OMNIA_1.1_IB_TC_002
+  hosts: infiniband
+  gather_facts: false
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+    - ../roles/webui_awx/vars/main.yml
+    - ../input_params/base_vars.yml
+  tasks: 
+     - name: Execute ib_facts
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        host_name: "{{ ib_host_name }}"
+        template_name: "{{ fact_template_value }}"
+        job_template_name: "{{ fact_job_name }}"
+        playbook_path: "{{ ib_facts_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+       
+     - block:
+        - name: Validate default flow with valid IP and valid credentials
+          assert:
+            that:
+              - ib_success_msg in job_status.status
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+          changed_when: false
+
+# Testcase OMNIA_1.1_IB_TC_003
+# Execute ib_facts.yml with Invalid IP in inventory group
+- name: OMNIA_1.1_IB_TC_003
+  hosts: infiniband
+  gather_facts: false
+  tags: TC_003
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+    - ../roles/webui_awx/vars/main.yml
+    - ../input_params/base_vars.yml
+  tasks:
+     - name: setting ip
+       set_fact:
+         ib_host_name: "{{ random_ip }}"
+    
+     - name: Execute ib_facts
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        host_name: "{{ ib_host_name }}"
+        template_name: "{{ fact_template_value }}"
+        job_template_name: "{{ fact_job_name }}"
+        playbook_path: "{{ ib_facts_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+
+     - block:
+        - name: Validate invalid IP and valid credentials
+          assert:
+            that:
+              - ib_fail_msg in job_status.status
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+          changed_when: false
+          failed_when: false
+      
+# Testcase OMNIA_1.1_IB_TC_001
+# Execute ib_facts.yml with no hosts in inventory
+- name: OMNIA_1.1_IB_TC_001
+  hosts: infiniband
+  gather_facts: false
+  tags: TC_001
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+    - ../roles/webui_awx/vars/main.yml
+    - ../input_params/base_vars.yml
+  tasks:
+     - name: Execute ib_facts with no host details
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        template_name: "{{ fact_template_value }}"
+        job_template_name: "{{ fact_job_name }}"
+        playbook_path: "{{ ib_facts_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+  
+     - block:
+        - name: Validate no hosts and valid credentials
+          assert:
+            that:
+              - ib_success_msg in job_status.status
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+          changed_when: false
+
+# Testcase OMNIA_1.1_IB_TC_004
+# Execute ib_facts.yml with valid IP and incorrect credentials
+- name: OMNIA_1.1_IB_TC_004
+  hosts: infiniband
+  gather_facts: false
+  tags: TC_004
+  connection: local
+  vars_files:
+    - test_vars/test_infiniband_vars.yml
+    - ../roles/webui_awx/vars/main.yml
+    - ../input_params/base_vars.yml
+  tasks:
+     - name: Making infiniband_credentials invalid
+       tower_credential:
+         name: "infiniband_credential"
+         credential_type: "Network"
+         inputs:
+           username: "{{ invalid_username }}"
+         
+     - name: Execute ib_facts
+       vars:
+        inventory_name: "{{ ib_inventory_name }}"
+        host_name: "{{ ib_host_name }}"
+        template_name: "{{ fact_template_value }}"
+        job_template_name: "{{ fact_job_name }}"
+        playbook_path: "{{ ib_facts_playbook_path }}"
+        delete_status: true
+       include_tasks: "{{ awx_script_path }}"
+
+     - block:
+        - name: Validate valid IP and invalid credentials
+          assert:
+            that:
+              - ib_fail_msg in job_status.status
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+          changed_when: false
+  
+     - name: Set credentials back to default
+       tower_credential:
+         name: "infiniband_credential"
+         credential_type: "Network"
+         inputs:
+           username: "{{ username }}"
+           password: "{{ password }}"

+ 754 - 0
control_plane/test/test_inventory.yml

@@ -0,0 +1,754 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---         
+# Test case to verify the prerequisites are installed and execute the AWX deployment
+- name: OMNIA_1.1_AWX_TC_001    
+  hosts: localhost
+  connection: local
+  vars_files:
+   - test_vars/test_inventory_vars.yml
+  tasks:
+     
+   - name: Check login_vars file is encrypted
+     command: cat "{{ login_vars_path }}"
+     changed_when: false
+     register: config_content
+     tags: always
+      
+   - name: Decrpyt login_vars.yml
+     command: >-
+       ansible-vault decrypt {{ login_vars_path }}
+       --vault-password-file {{ login_vars_vault_path }}
+     changed_when: false
+     when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+     tags: always
+
+   - name: Include variable file login_vars.yml
+     include_vars: "{{ login_vars_path }}"
+     tags: always
+       
+   - name: Encypt login file
+     command: >-
+       ansible-vault encrypt {{ login_vars_path }}
+       --vault-password-file {{ login_vars_vault_path }}
+     changed_when: false
+     tags: always
+                     
+   - name: Execute awx command
+     command: "kubectl get pods -n {{ awx_namespace }}"
+     changed_when: true
+     register: k8s_pods
+     run_once: true
+     ignore_errors: true
+     tags: TC_001,VERIFY_OMNIA_01    
+     
+   - name: Validate awx operator containers
+     assert:
+      that:  
+       -  k8s_pods.stdout | regex_search("{{ item }}")
+      fail_msg: "{{ awx_fail_msg }}"
+      success_msg: "{{ awx_success_msg }}"
+     loop: 
+       - "awx-([A-Za-z0-9]{10})-([A-Za-z0-9]{5})"
+       - "awx-operator-([A-Za-z0-9]{10})-([A-Za-z0-9]{5})"
+       - "awx-postgres-([A-Za-z0-9]{1})"
+     run_once: true
+     tags: TC_001,VERIFY_OMNIA_01       
+            
+# Test case to verify inventory groups are present in AWX UI  (idrac, ethernet, inifiniband, rbod)  
+- name: OMNIA_1.1_AWX_TC_003   
+  hosts: localhost
+  connection: local
+  
+  vars_files:
+   - test_vars/test_inventory_vars.yml
+  tasks:                 
+
+   - name: Execute get pods command
+     command: "kubectl get pods -n {{ awx_namespace }}"
+     changed_when: true
+     register: k8s_pods
+     run_once: true
+     ignore_errors: true
+     tags: TC_003     
+     
+   - name: Get awx pod 
+     set_fact:
+      awx_pods: "{{ item | regex_search(awx_pod_regex) | trim  }}"
+      idrac_status: true
+     with_items: 
+       - "{{ k8s_pods.stdout_lines }}"
+     run_once: true
+     when: item | regex_search(awx_pod_item_regex)
+     tags: TC_003
+
+   - name: Get awx cluster ip
+     shell: "kubectl get svc awx-ui -n {{ awx_namespace }} -o jsonpath='{.spec.clusterIP}'"
+     register: awx_cluster_ip
+     changed_when: false
+     ignore_errors: true
+     tags: TC_003
+
+   - name: Get AWX admin password
+     shell: "kubectl get secret awx-admin-password -n {{ awx_namespace }} -o jsonpath='{.data.password}' | base64 --decode"
+     register: awx_admin_password
+     changed_when: false
+     ignore_errors: true
+     tags: TC_003
+          
+   - name: Execute awx get inventory hosts command
+     command: "awx --conf.host http://{{ awx_cluster_ip.stdout }}:8052 --conf.username admin --conf.password {{ awx_admin_password.stdout }} --conf.insecure hosts list --inventory {{ item }} -f human --filter 'name'"
+     register: idrac_hosts
+     with_items:
+      - "idrac_inventory"
+      - "infiniband_inventory"
+      - "ethernet_inventory"
+      - "powervault_me4_inventory"
+     run_once: true
+     changed_when: false
+     tags: TC_003        
+       
+   - name: Verify  inventory are present in AWX UI  
+     assert:
+      that: 
+       - item.stdout_lines[0] | regex_search("name")
+      fail_msg: "{{ item.item }} - {{ inventory_fail_msg }}"
+      success_msg: "{{ item.item }} - {{ inventory_success_msg }}"
+     with_items:
+      - "{{ idrac_hosts.results }}"
+     changed_when: false
+     tags: TC_003
+              
+# Test case to validate ip of idrac     
+- name: OMNIA_1.1_AWX_TC_004    
+  hosts: localhost
+  connection: local
+  
+  vars_files:
+   - test_vars/test_inventory_vars.yml
+   - ../input_params/base_vars.yml  
+  tasks:       
+      
+   - name: Execute get pods command
+     command: "kubectl get pods -n {{ awx_namespace }}"
+     changed_when: true
+     register: k8s_pods
+     run_once: true
+     ignore_errors: true
+     tags: TC_004     
+     
+   - name: Get awx pod 
+     set_fact:
+      awx_pods: "{{ item | regex_search(awx_pod_regex) | trim  }}"
+     with_items: 
+       - "{{ k8s_pods.stdout_lines }}"
+     run_once: true
+     when: item | regex_search(awx_pod_item_regex)
+     changed_when: false
+     tags: TC_004
+
+   - name: Get awx cluster ip
+     shell: "kubectl get svc awx-ui -n {{ awx_namespace }} -o jsonpath='{.spec.clusterIP}'"
+     register: awx_cluster_ip
+     changed_when: false
+     tags: TC_004
+
+   - name: Get AWX admin password
+     shell: "kubectl get secret awx-admin-password -n {{ awx_namespace }} -o jsonpath='{.data.password}' | base64 --decode"
+     register: awx_admin_password
+     changed_when: false
+     ignore_errors: true
+     tags: TC_004
+          
+   - name: Execute awx get inventory hosts command
+     command: "awx --conf.host http://{{ awx_cluster_ip.stdout }}:8052 --conf.username admin --conf.password {{ awx_admin_password.stdout }} --conf.insecure hosts list --inventory {{ idrac_inventory_name }} -f human --filter 'name'"
+     changed_when: true
+     register: idrac_hosts
+     run_once: true
+     tags: TC_004     
+     
+   - name: List of iDRAC host
+     include_tasks: "{{ validation_script_path }}"
+     with_items:
+      - "{{ idrac_hosts.stdout_lines[2:] }}"
+     when: idrac_hosts.stdout_lines | length > 2
+     ignore_errors: true
+     tags: TC_004
+     
+   - name: Empty iDRAC hosts
+     debug:
+      msg: "{{ empty_host_err }}"
+     when: idrac_hosts.stdout_lines | length < 3
+     failed_when: false
+     tags: TC_004     
+
+# Test case to validate ip of infiniband
+- name: OMNIA_1.1_AWX_TC_005    
+  hosts: localhost
+  connection: local
+  
+  vars_files:
+   - test_vars/test_inventory_vars.yml
+   - ../input_params/base_vars.yml  
+  tasks:                 
+
+   - name: Execute get pods command
+     command: "kubectl get pods -n {{ awx_namespace }}"
+     changed_when: true
+     register: k8s_pods
+     run_once: true
+     ignore_errors: true
+     tags: TC_005     
+     
+   - name: Get awx pod 
+     set_fact:
+      awx_pods: "{{ item | regex_search(awx_pod_regex) | trim  }}"
+     with_items: 
+       - "{{ k8s_pods.stdout_lines }}"
+     run_once: true
+     when: item | regex_search(awx_pod_item_regex)
+     failed_when: false
+     tags: TC_005
+
+   - name: Get awx cluster ip
+     shell: "kubectl get svc awx-ui -n {{ awx_namespace }} -o jsonpath='{.spec.clusterIP}'"
+     register: awx_cluster_ip
+     changed_when: false
+     tags: TC_005
+
+   - name: Get AWX admin password
+     shell: "kubectl get secret awx-admin-password -n {{ awx_namespace }} -o jsonpath='{.data.password}' | base64 --decode"
+     register: awx_admin_password
+     changed_when: false
+     ignore_errors: true
+     tags: TC_005
+          
+   - name: Execute awx get inventory hosts command
+     command: "awx --conf.host http://{{ awx_cluster_ip.stdout }}:8052 --conf.username admin --conf.password {{ awx_admin_password.stdout }} --conf.insecure hosts list --inventory {{ ib_inventory_name }} -f human --filter 'name'"
+     changed_when: true
+     register: infiniband_hosts
+     run_once: true
+     ignore_errors: true
+     tags: TC_005     
+     
+   - name: List of infiniband hosts
+     include_tasks: "{{ validation_script_path }}"  
+     with_items:
+      - "{{ infiniband_hosts.stdout_lines[2:] }}"
+     when: infiniband_hosts.stdout_lines | length > 2
+     ignore_errors: true
+     tags: TC_005
+     
+   - name: Empty infiniband hosts
+     debug:
+      msg: "{{ empty_host_err }}"
+     when: infiniband_hosts.stdout_lines | length < 3
+     failed_when: false
+     tags: TC_005 
+
+# Test case to validate ip of ethernet
+- name: OMNIA_1.1_AWX_TC_006    
+  hosts: localhost
+  connection: local
+  
+  vars_files:
+   - test_vars/test_inventory_vars.yml
+   - ../input_params/base_vars.yml  
+  tasks:                 
+
+   - name: Execute get pods command
+     command: "kubectl get pods -n {{ awx_namespace }}"
+     changed_when: true
+     register: k8s_pods
+     run_once: true
+     ignore_errors: true
+     tags: TC_006     
+     
+   - name: Get awx pod 
+     set_fact:
+      awx_pods: "{{ item | regex_search(awx_pod_regex) | trim  }}"
+     with_items: 
+       - "{{ k8s_pods.stdout_lines }}"
+     run_once: true
+     when: item | regex_search(awx_pod_item_regex)
+     failed_when: false
+     tags: TC_006
+
+   - name: Get awx cluster ip
+     shell: "kubectl get svc awx-ui -n {{ awx_namespace }} -o jsonpath='{.spec.clusterIP}'"
+     register: awx_cluster_ip
+     changed_when: false
+     tags: TC_006
+
+   - name: Get AWX admin password
+     shell: "kubectl get secret awx-admin-password -n {{ awx_namespace }} -o jsonpath='{.data.password}' | base64 --decode"
+     register: awx_admin_password
+     changed_when: false
+     ignore_errors: true
+     tags: TC_006
+          
+   - name: Execute awx get inventory hosts command
+     command: "awx --conf.host http://{{ awx_cluster_ip.stdout }}:8052 --conf.username admin --conf.password {{ awx_admin_password.stdout }} --conf.insecure hosts list --inventory {{ ethernet_inventory_name }} -f human --filter 'name'"
+     changed_when: true
+     register: ethernet_hosts
+     run_once: true
+     ignore_errors: true
+     tags: TC_006     
+     
+   - name: List of ethernet hosts   
+     include_tasks: "{{ validation_script_path }}" 
+     with_items:
+      - "{{ ethernet_hosts.stdout_lines[2:] }}"
+     when: ethernet_hosts.stdout_lines | length > 2
+     ignore_errors: true
+     tags: TC_006
+     
+   - name: Empty ethernet hosts
+     debug:
+      msg: "{{ empty_host_err }}"
+     when: ethernet_hosts.stdout_lines | length < 3
+     failed_when: false
+     tags: TC_006
+      
+# Test case to validate ip of powervault      
+- name: OMNIA_1.1_AWX_TC_007    
+  hosts: localhost
+  connection: local
+  
+  vars_files:
+   - test_vars/test_inventory_vars.yml
+   - ../input_params/base_vars.yml  
+  tasks:                 
+
+   - name: Execute get pods command
+     command: "kubectl get pods -n {{ awx_namespace }}"
+     changed_when: true
+     register: k8s_pods
+     run_once: true
+     ignore_errors: true
+     tags: TC_007     
+     
+   - name: Get awx pod 
+     set_fact:
+      awx_pods: "{{ item | regex_search(awx_pod_regex) | trim  }}"
+      idrac_status: true
+     with_items: 
+       - "{{ k8s_pods.stdout_lines }}"
+     run_once: true
+     when: item | regex_search(awx_pod_item_regex)
+     failed_when: false
+     tags: TC_007
+
+   - name: Get awx cluster ip
+     shell: "kubectl get svc awx-ui -n {{ awx_namespace }} -o jsonpath='{.spec.clusterIP}'"
+     register: awx_cluster_ip
+     changed_when: false
+     tags: TC_007
+
+   - name: Get AWX admin password
+     shell: "kubectl get secret awx-admin-password -n {{ awx_namespace }} -o jsonpath='{.data.password}' | base64 --decode"
+     register: awx_admin_password
+     changed_when: false
+     ignore_errors: true
+     tags: TC_007
+          
+   - name: Execute awx get inventory hosts command
+     command: "awx --conf.host http://{{ awx_cluster_ip.stdout }}:8052 --conf.username admin --conf.password {{ awx_admin_password.stdout }} --conf.insecure hosts list --inventory {{ pv_inventory_name }} -f human --filter 'name'"
+     changed_when: true
+     register: powervault_hosts
+     run_once: true
+     ignore_errors: true
+     tags: TC_007     
+     
+   - name: List of powervault hosts
+     include_tasks: "{{ validation_script_path }}"  
+     with_items:
+      - "{{ powervault_hosts.stdout_lines[2:] }}"
+     when: powervault_hosts.stdout_lines | length > 2
+     ignore_errors: true
+     tags: TC_007
+     
+   - name: Empty powervault hosts
+     debug:
+      msg: "{{ empty_host_err }}"
+     when: powervault_hosts.stdout_lines | length < 3
+     failed_when: false
+     tags: TC_007
+
+# Test case to verify omnia inventory groups (manager, compute, login, nfs)                        
+- name: OMNIA_1.1_AWX_TC_008   
+  hosts: localhost
+  connection: local
+  vars_files:
+   - test_vars/test_inventory_vars.yml
+   - ../roles/webui_awx/vars/main.yml
+    
+  tasks:                 
+
+    - name: Get awx-service Cluster-IP
+      command: "kubectl get svc awx-service -n {{ awx_namespace }} -o jsonpath='{.spec.clusterIP}'"
+      register: awx_cluster_ip
+      changed_when: false
+      run_once: true
+      ignore_errors: true
+      tags: TC_008
+    
+    - name: Get AWX admin password
+      shell: "kubectl get secret awx-admin-password -n {{ awx_namespace }} -o jsonpath='{.data.password}' | base64 --decode"
+      register: awx_admin_password
+      changed_when: false
+      run_once: true
+      ignore_errors: true
+      tags: TC_008
+         
+    - name: Set IP and password
+      set_fact:
+        awx_ip: 'http://{{ awx_cluster_ip.stdout }}'
+        admin_password: "{{ awx_admin_password.stdout }}"
+      run_once: true
+      failed_when: false
+      tags: TC_008
+       
+    - name: Get omnia inventory groups
+      awx.awx.tower_group:
+        name: "{{ item.name }}"
+        description: "{{ item.description }}"
+        inventory: "node_inventory"
+        state: present
+      loop: "{{ group_names }}"
+      register: awx_group
+      run_once: true
+      ignore_errors: true
+      tags: TC_008
+     
+    - name: Verify omnia inventory groups
+      assert:
+       that: 
+         - item.changed == false
+         - item.item.name == "{{ manager_group }}" or 
+           item.item.name =="{{ compute_group }}" or 
+           item.item.name == "{{ login_group }}" or 
+           item.item.name == "{{ nfs_group }}"
+       fail_msg: "{{ item .item.name }}{{ group_fail_msg }}"
+       success_msg: "{{ item .item.name }}{{ group_success_msg }}"
+      with_items:
+       - "{{ awx_group.results }}"
+      failed_when: false
+      tags: TC_008
+      
+# Test case to verify AWX configuration is done properly with job_templates, schedules in place      
+- name: OMNIA_1.1_AWX_TC_009   
+  hosts: localhost
+  connection: local
+  vars_files:
+   - test_vars/test_inventory_vars.yml
+   - ../roles/webui_awx/vars/main.yml
+    
+  tasks:                 
+
+    - name: Get awx-service Cluster-IP
+      command: "kubectl get svc awx-service -n {{ awx_namespace }} -o jsonpath='{.spec.clusterIP}'"
+      register: awx_cluster_ip
+      changed_when: false
+      ignore_errors: true
+      run_once: true
+      tags: TC_009
+    
+    - name: Get AWX admin password
+      shell: "kubectl get secret awx-admin-password -n {{ awx_namespace }} -o jsonpath='{.data.password}' | base64 --decode"
+      register: awx_admin_password
+      changed_when: false
+      ignore_errors: true
+      run_once: true
+      tags: TC_009
+           
+    - name: Set IP and password
+      set_fact:
+        awx_ip: 'http://{{ awx_cluster_ip.stdout }}'
+        admin_password: "{{ awx_admin_password.stdout }}"
+      failed_when: false
+      run_once: true
+      tags: TC_009   
+      
+    - name: Get job template details
+      awx.awx.tower_job_template:
+        name: "{{ item.name }}"
+        job_type: "run"
+        organization: "{{ organization_name }}"
+        inventory: "{{ item.inventory }}"
+        project: "{{ project_name }}"
+        playbook: "{{ item.playbook }}"
+        credentials:
+          - "{{ item.credential }}"
+        state: present
+        tower_config_file: "{{ tower_config_file_path }}"
+      loop: "{{ job_template_details }}"
+      register: job_template
+      when: item.flag
+      ignore_errors: true
+      tags: TC_009  
+
+    - name: Validate job template 
+      assert:
+       that: 
+         - item.changed == false
+       fail_msg: "{{ item.item.name }}{{ job_template_fail_msg }}"
+       success_msg: " {{ item.item.name }} {{ job_template_success_msg }}"
+      with_items:
+        - "{{ job_template.results }}"
+      failed_when: false
+      when: item.item.flag
+      tags: TC_009  
+      
+    - name: Build a schedule for job template
+      awx.awx.tower_schedule:
+        name: "{{ item.name }}"
+        unified_job_template: "{{ item.template }}"
+        rrule: "{{ schedule_rule }}"
+        state: present
+        tower_config_file: "{{ tower_config_file_path }}"
+      register: schedule
+      loop: "{{ scheduled_templates }}"
+      failed_when: false
+      run_once: true
+      tags: TC_009
+      
+    - name: Validate schedule status
+      assert:
+       that: 
+         - schedule.changed == false
+       fail_msg: "{{ schedule_fail_msg }}"
+       success_msg: "{{ schedule_success_msg }}"
+      failed_when: false
+      tags: TC_009 
+
+
+# Test case to verify updation of new node in omnia inventory
+- name: OMNIA_1.1_AWX_TC_010   
+  hosts: localhost
+  connection: local
+  
+  vars_files:
+   - test_vars/test_inventory_vars.yml
+   - ../input_params/base_vars.yml  
+  tasks:                 
+
+    - name: Execute get pods command
+      command: "kubectl get pods -n {{ awx_namespace }}"
+      changed_when: true
+      register: k8s_pods
+      run_once: true
+      ignore_errors: true
+      tags: TC_010
+          
+    - name: Get awx pod 
+      set_fact:
+       awx_pods: "{{ item | regex_search(awx_pod_regex) | trim  }}"
+      with_items: 
+        - "{{ k8s_pods.stdout_lines }}"
+      run_once: true
+      when: item | regex_search(awx_pod_item_regex)
+      failed_when: false
+      tags: TC_010
+
+    - name: Get AWX admin password
+      shell: "kubectl get secret awx-admin-password -n {{ awx_namespace }} -o jsonpath='{.data.password}' | base64 --decode"
+      register: awx_admin_password
+      changed_when: false
+      ignore_errors: true
+      tags: TC_010
+          
+    - name: Execute awx get inventory hosts command
+      command: "awx --conf.host {{ awx_host }} --conf.username admin --conf.password {{ awx_admin_password.stdout }} --conf.insecure hosts list --inventory {{ node_inventory_name }} -f human --filter 'name'"
+      changed_when: true
+      register: node_hosts
+      run_once: true
+      failed_when: false
+      tags: TC_010
+         
+    - name: Get node_inventory hosts
+      command: ping -c1 {{ item }}
+      delegate_to: localhost
+      register: ping_result
+      ignore_errors: yes
+      changed_when: false
+      with_items:
+      - "{{ node_hosts.stdout_lines[2:] }}"
+      when: node_hosts.stdout_lines | length > 2
+      tags: TC_010
+            
+    - name: Verify updation of new node
+      assert:
+       that: 
+         - "'100% packet loss' not in item.stdout"
+       fail_msg: "{{ node_fail_msg }}"
+       success_msg: "{{ node_success_msg }}"
+      with_items:
+       - "{{ ping_result.results }}"
+      when: node_hosts.stdout_lines | length > 2
+      failed_when: false
+      tags: TC_010
+
+    - name: Empty node hosts
+      debug:
+       msg: "{{ empty_host_err }}"
+      when: node_hosts.stdout_lines | length < 3
+      tags: TC_010
+          
+# Test case to verify AWX configuration is done properly with all items in place      
+- name: OMNIA_1.1_AWX_TC_011   
+  hosts: localhost
+  connection: local
+  vars_files:
+   - test_vars/test_inventory_vars.yml
+   - ../roles/webui_awx/vars/main.yml
+    
+  tasks:                 
+
+    - name: Get awx-service Cluster-IP
+      command: "kubectl get svc awx-service -n {{ awx_namespace }} -o jsonpath='{.spec.clusterIP}'"
+      register: awx_cluster_ip
+      changed_when: false
+      ignore_errors: true
+      run_once: true
+      tags: TC_011
+    
+    - name: Get AWX admin password
+      shell: "kubectl get secret awx-admin-password -n {{ awx_namespace }} -o jsonpath='{.data.password}' | base64 --decode"
+      register: awx_admin_password
+      changed_when: false
+      ignore_errors: true
+      run_once: true
+      tags: TC_011
+           
+    - name: Set IP and password
+      set_fact:
+        awx_ip: 'http://{{ awx_cluster_ip.stdout }}'
+        admin_password: "{{ awx_admin_password.stdout }}"
+      run_once: true
+      tags: TC_011
+
+    - name: Get organization details
+      awx.awx.tower_organization:
+        name: "{{ organization_name }}"
+        description: "{{ org_description }}"
+        state: present
+      register: organization
+      ignore_errors: true
+      run_once: true
+      tags: TC_011
+      
+    - name: Validate an organization
+      assert:
+       that: 
+         - organization.changed == false
+       fail_msg: "{{ organization_fail_msg }}"
+       success_msg: "{{ organization_success_msg }}"
+      failed_when: false
+      tags: TC_011
+             
+    - name: Get tower inventory details
+      awx.awx.tower_inventory:
+        name: "{{ item.name }}"
+        description: "{{ item.description }}"
+        organization: "{{ organization_name }}"
+        state: present
+      loop: "{{ inventory_names }}"  
+      register: inventory 
+      when: item.flag
+      ignore_errors: true
+      run_once: true
+      tags: TC_011 
+
+    - name: Validate inventory status
+      assert:
+       that: 
+         - item.changed == false
+       fail_msg: "{{ inventory_fail_msg }}"
+       success_msg: "{{ inventory_success_msg }}"
+      with_items:
+       - "{{ inventory.results }}"
+      failed_when: false
+      tags: TC_011   
+      
+    - name: Get job template details
+      awx.awx.tower_job_template:
+        name: "{{ item.name }}"
+        job_type: "run"
+        organization: "{{ organization_name }}"
+        inventory: "{{ item.inventory }}"
+        project: "{{ project_name }}"
+        playbook: "{{ item.playbook }}"
+        credentials:
+          - "{{ item.credential }}"
+        state: present
+      loop: "{{ job_template_details }}"
+      register: job_template
+      when: item.flag
+      ignore_errors: true
+      run_once: true
+      tags: TC_011  
+
+    - name: Validate job template 
+      assert:
+       that: 
+         - item.changed == false
+       fail_msg: "{{ item.item.name }} {{ job_template_fail_msg }}"
+       success_msg: "{{ item.item.name }} {{ job_template_success_msg }}"
+      with_items:
+        - "{{ job_template.results }}"
+      failed_when: false
+      when: item.item.flag
+      tags: TC_011 
+      
+    - name: Get project details
+      awx.awx.tower_project:
+        name: "{{ project_name }}"
+        description: "{{ project_description }}"
+        organization: "{{ organization_name }}"
+        state: present
+      register: project
+      ignore_errors: true
+      run_once: true
+      tags: TC_011 
+            
+    - name: Verify project 
+      assert:
+       that: 
+         - project.changed == false
+       fail_msg: "{{ project_fail_msg }}"
+       success_msg: "{{ project_success_msg }}"
+      failed_when: false
+      tags: TC_011 
+      
+    - name: Build a schedule for job template
+      awx.awx.tower_schedule:
+        name: "{{ item.name }}"
+        unified_job_template: "{{ item.template }}"
+        rrule: "{{ schedule_rule }}"
+        state: present
+      register: schedule
+      loop: "{{ scheduled_templates }}"
+      failed_when: false
+      run_once: true
+      tags: TC_011
+      
+    - name: Validate schedule status
+      assert:
+       that: 
+         - schedule.changed == false
+       fail_msg: "{{ schedule_fail_msg }}"
+       success_msg: "{{ schedule_success_msg }}"
+      failed_when: false
+      tags: TC_011

+ 147 - 0
control_plane/test/test_inventory_validation.yml

@@ -0,0 +1,147 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- block: 
+      
+   - name: Get iDRAC IP details
+     dellemc.openmanage.idrac_system_info:
+      idrac_ip: "{{ item | trim }}"
+      idrac_user: "{{ idrac_username }}"
+      idrac_password: "{{ idrac_password }}"
+     register: idrac_ip
+     run_once: true
+     ignore_errors: yes 
+     tags: "TC_004"   
+
+   - name: Validate iDRAC IP
+     assert:
+      that: 
+        - idrac_ip.system_info.iDRACNIC[0].ProductInfo == idrac_search_key
+      fail_msg: "{{ item | trim }} {{ idrac_fail_msg }}"
+      success_msg: "{{ item | trim }} {{ idrac_success_msg }}"
+     tags: "TC_004" 
+
+   - name: Authenticate infiniband Switch
+     uri:
+       url: http://{{ item | trim }}/admin/launch?script=rh&template=login&action=login
+       method: POST
+       body_format: form-urlencoded
+       body:
+         f_user_id: "{{ ib_username }}"
+         f_password: "{{ ib_password }}"
+         enter: Sign in
+       status_code: "{{ infiniband_status_code }}"
+     register: login
+     ignore_errors: yes
+     run_once: true
+     tags: "TC_005"  
+          
+   - name: Assert infiniband switch
+     uri:
+       url: http://{{ item | trim }}/admin/launch?script=json
+       method: POST
+       body_format: json
+       headers:
+         Cookie: "{{ login.set_cookie.split(';')[0] }}"
+       body:
+         {
+         "commands":
+          [
+            "show version"
+          ]
+         }
+     register: infinibandswitch_info
+     when: login.failed == false  or 'set_cookie' not in login
+     ignore_errors: yes
+     run_once: true
+     tags: "TC_005"
+              
+   - name: Validate infiniband  IP
+     assert:
+      that: 
+        - infinibandswitch_info.json.data['Product name'] == infiniband_search_key
+      fail_msg: "{{ infiniband_fail_msg }}"
+      success_msg: "{{ infiniband_success_msg }}"
+     ignore_errors: yes
+     tags: "TC_005"
+          
+   - name: Get ethernet IP details
+     dellos10_command:
+       provider:
+         host: "{{ item | trim }}"
+         username: "{{ ethernet_switch_username }}"
+         password: "{{ ethernet_switch_password }}"
+       commands: ['show version']
+     register: ethernet_info
+     ignore_errors: yes
+     run_once: true
+     when: ethernet_switch_support
+     tags: "TC_006"
+
+   - name: Validate ethernet  IP
+     assert:
+      that: 
+        - ethernet_info.stdout | regex_search(ethernet_search_key)
+      fail_msg: " {{ item | trim }} {{ ethernet_fail_msg }}"
+      success_msg: "{{ item | trim }} {{ ethernet_success_msg }}"
+     tags: "TC_006"
+           
+   - name: Get auth string for powervault
+     shell: echo -n {{ powervault_me4_username }}_{{powervault_me4_password}} | sha256sum
+     changed_when: false
+     when: powervault_support 
+     register: auth_string
+     ignore_errors: yes
+     run_once: true
+     tags: "TC_007"
+         
+   - name: Get session key for powervault
+     uri:
+       url: https://{{ item | trim }}/api/login/{{ auth_string.stdout | replace(" -", "") }}
+       method: GET
+       headers:
+         {'datatype': 'json'}
+       validate_certs: no
+     when: powervault_support 
+     register: session_key
+     ignore_errors: yes
+     run_once: true
+     tags: "TC_007"
+        
+   - name: Assert me4_powervault
+     uri:
+       url: https://{{ item | trim }}/api/show/system
+       method: GET
+       body_format: json
+       validate_certs: no
+       use_proxy: no
+       headers:
+         {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'} 
+     register: system_info
+     ignore_errors: yes
+     run_once: true
+     tags: "TC_007"
+
+   - name: Validate me4_powervault  IP
+     assert:
+      that: 
+        - "'{{ me4_powervault_search_key }}' in system_info.json.system[0]['scsi-product-id']"
+      fail_msg: "{{ item | trim }} {{ powervault_fail_msg }}"
+      success_msg: "{{ item | trim }} {{ powervault_success_msg }}"
+     ignore_errors: yes
+     tags: "TC_007"
+       
+  rescue:
+    - debug:
+       msg: "{{ failed_msg }}"

+ 150 - 16
control_plane/test/test_powervault.yml

@@ -204,6 +204,124 @@
         fail_msg: "{{ user_validation_failure_msg }}"
       tags: VERIFY_OMNIA_01
 
+# Testcase OMNIA_1.1_PV_TC_018
+# Test case to validate linear storage with linear pools and disk groups
+
+- name: OMNIA_1.1_PV_TC_018
+  hosts: powervault
+  connection: local
+  tags: TC_018
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_powervault_vars.yml
+    - ../input_params/powervault_me4_vars.yml
+    - ../input_params/base_vars.yml
+    - ../roles/webui_awx/vars/main.yml
+  tasks:
+
+    - name: Set powervault parameters
+      lineinfile:
+        path: "{{ powervault_me4_var_path }}"
+        regexp: "{{ item.regexp }}"
+        line: "{{ item.line }}"
+      loop:
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ linear_pool_type }}"' }
+        - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia }}"' }
+        - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
+        - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_1_2 }}"' }
+        - { regexp: '^powervault_me4_k8s_volume_name', line: 'powervault_me4_k8s_volume_name: "{{ volume_name_k8s_v1 }}"' }
+        - { regexp: '^powervault_me4_slurm_volume_name', line: 'powervault_me4_slurm_volume_name: "{{ volume_name_slurm_v1 }}"' }
+        - { regexp: '^powervault_me4_volume_size', line: 'powervault_me4_volume_size: "{{ volume_size_input_100gb }}"' }
+
+    - name: Execute powervault playbook using AWX collections
+      vars:
+        inventory_name: "{{ powervault_inventory_name }}"
+        template_name: "{{ template_value }}"
+        job_template_name: "{{ job_name }}"
+        playbook_path: "{{ powervault_playbook_path }}"
+        delete_status: false
+      include_tasks: "{{ awx_script_path }}"  
+
+    - name: Validate powervault playbook execution output
+      assert:
+        that:
+          - success_job_status in job_status.status
+        success_msg: "{{ playbook_exec_success_msg }}"
+        fail_msg: "{{ playbook_exec_fail_msg }}"
+      changed_when: false
+
+    - name: Execute show disk groups command
+      uri:
+        url: https://{{ inventory_hostname }}/api/show/disk-groups/pool/{{ disk_group_name_omnia }}/{{ disk_group_name_omnia }}
+        method: GET
+        body_format: json
+        validate_certs: no
+        use_proxy: no
+        headers:
+          {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
+      register: pools_diskgroup_info
+      tags: VERIFY_OMNIA_01
+
+    - name: Verify the pool and disk group additions
+      assert:
+        that:
+          - linear_pool_type in pools_diskgroup_info.json['disk-groups'][0]['storage-type']
+          - disk_group_name_omnia in pools_diskgroup_info.json['disk-groups'][0].name
+          - disk_group_name_omnia in pools_diskgroup_info.json['disk-groups'][0].pool
+          - disk_type_ssdsas in pools_diskgroup_info.json['disk-groups'][0]['disk-description']
+          - raid1_level in pools_diskgroup_info.json['disk-groups'][0].raidtype
+        success_msg: "{{ pool_diskgroup_config_success_msg }}"
+        fail_msg: "{{ pool_diskgroup_config_failure_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Execute show volumes command for k8s
+      uri:
+        url: https://{{ inventory_hostname }}/api/show/volumes/{{ volume_name_k8s_v1 }}
+        method: GET
+        body_format: json
+        validate_certs: no
+        use_proxy: no
+        headers:
+          {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
+      register: k8s_volumes_info
+      tags: VERIFY_OMNIA_01
+
+    - name: Execute show volumes command for slurm
+      uri:
+        url: https://{{ inventory_hostname }}/api/show/volumes/{{ volume_name_slurm_v1 }}
+        method: GET
+        body_format: json
+        validate_certs: no
+        use_proxy: no
+        headers:
+          {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
+      register: slurm_volumes_info
+      tags: VERIFY_OMNIA_01
+
+    - name: Verify the volumes creation
+      assert:
+        that:
+          - volume_name_k8s_v1 in k8s_volumes_info.json.volumes[0]['volume-name']
+          - volume_size_100gb in k8s_volumes_info.json.volumes[0].size
+          - volume_name_slurm_v1 in slurm_volumes_info.json.volumes[0]['volume-name']
+          - volume_size_100gb in slurm_volumes_info.json.volumes[0].size
+          - disk_group_name_omnia in k8s_volumes_info.json.volumes[0]['storage-pool-name']
+        success_msg: "{{ volumes_creation_success_msg }}"
+        fail_msg: "{{ volumes_creation_failure_msg }}"
+      tags: VERIFY_OMNIA_01
+   
+    - name: Delete the disk group created
+      uri:
+        url: https://{{ inventory_hostname }}/api/remove/disk-groups/{{ disk_group_name_omnia }}
+        method: GET
+        body_format: json
+        validate_certs: no
+        use_proxy: no
+        headers:
+          {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
+      register: delete_diskgroup
+
 # Testcase OMNIA_1.1_PV_TC_005
 # Test case to validate configuration of pools, disk group and storage volumes with SAS MDL disks, RAID 1 level, storage with rw access 
 
@@ -227,6 +345,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA01 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_5_6 }}"' }
@@ -240,7 +359,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -251,6 +370,15 @@
         fail_msg: "{{ playbook_exec_fail_msg }}"
       changed_when: false
 
+    - name: Get session key
+      uri:
+        url: https://{{ inventory_hostname }}/api/login/{{ auth_string.stdout | replace(" -", "") }}
+        method: GET
+        headers:
+          {'datatype': 'json'}
+        validate_certs: no
+      register: session_key
+
     - name: Execute show disk groups command
       uri:
         url: https://{{ inventory_hostname }}/api/show/disk-groups/pool/{{ pool_a }}/{{ disk_group_name_omnia_dgA01 }}
@@ -261,7 +389,6 @@
         headers:
           {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
       register: pools_diskgroup_info
-      tags: VERIFY_OMNIA_01
 
     - name: Verify the pool and disk group additions
       assert:
@@ -273,7 +400,6 @@
           - raid1_level in pools_diskgroup_info.json['disk-groups'][0].raidtype
         success_msg: "{{ pool_diskgroup_config_success_msg }}"
         fail_msg: "{{ pool_diskgroup_config_failure_msg }}"
-      tags: VERIFY_OMNIA_01
 
     - name: Execute show volumes command
       uri:
@@ -285,7 +411,6 @@
         headers:
           {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
       register: k8s_volumes_info
-      tags: VERIFY_OMNIA_01
 
     - name: Execute show volumes command
       uri:
@@ -297,7 +422,6 @@
         headers:
           {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
       register: slurm_volumes_info
-      tags: VERIFY_OMNIA_01
 
     - name: Verify the volumes creation
       assert:
@@ -309,7 +433,6 @@
           - pool_a in k8s_volumes_info.json.volumes[0]['storage-pool-name']
         success_msg: "{{ volumes_creation_success_msg }}"
         fail_msg: "{{ volumes_creation_failure_msg }}"
-      tags: VERIFY_OMNIA_01
 
 # Testcase OMNIA_1.1_PV_TC_004
 # Test case to validate the SNMP trap notification when level is set to none
@@ -368,6 +491,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_3_4 }}"' }
@@ -381,7 +505,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -415,6 +539,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_5_6 }}"' }
@@ -428,7 +553,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -462,6 +587,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA01 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_2_3 }}"' }
@@ -475,7 +601,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -509,6 +635,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_2_3 }}"' }
@@ -522,7 +649,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -587,6 +714,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_2_3 }}"' }
@@ -600,7 +728,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -665,6 +793,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid5_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_0_2 }}"' }
@@ -678,7 +807,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -765,6 +894,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_b }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgB01 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid6_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_7_10 }}"' }
@@ -778,7 +908,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -865,6 +995,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_b }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgB01 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid10_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_8_11 }}"' }
@@ -878,7 +1009,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -965,6 +1096,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_b }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgB01 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid_adapt_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_0_11 }}"' }
@@ -978,7 +1110,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -1036,6 +1168,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_2_3 }}"' }
@@ -1049,7 +1182,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -1145,6 +1278,7 @@
       loop:
         - { regexp: '^powervault_me4_snmp_notify_level', line: 'powervault_me4_snmp_notify_level: "{{ snmp_notify_level_crit }}"' }
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_2_3 }}"' }

+ 56 - 0
control_plane/test/test_vars/test_infiniband_vars.yml

@@ -0,0 +1,56 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Usage : test_infiniband_config.yml
+ib_inventory_name: "infiniband_inventory"
+template_value: 'infiniband_template'
+ib_host_name: 100.96.28.140
+job_name: "test_infiniband_template"
+ib_playbook_path: "control_plane/infiniband.yml"
+success_message: "Execution Successful"
+fail_case: "Failed. please check input parameters and try again!"
+awx_script_path: "test_prepare.yml"
+ib_vars_dir: ../input_params/ib_vars.yml
+ib_vars_backup_dir: ib_vars_backup.yml
+inventory_dir: test_ib_inventory
+ib_mtu_path: test_ib_mtu.yml
+login_vars_path: "../input_params/login_vars.yml"
+login_vars_vault_path: "../input_params/.login_vault_key"
+tower_config_file_path: "../roles/webui_awx/files/.tower_cli.cfg"
+tower_vault_file_path: "../roles/webui_awx/files/.tower_vault_key"
+file_perm: '0644'
+
+# Usage: test_ib_mtu.yml
+username: admin
+password: admin
+var_check: '"MTU": "2048"'
+
+# Usage: test_infiniband_facts.yml
+fact_template_value: 'infiniband_template'
+fact_job_name: 'test_ib_fact_job'
+ib_facts_playbook_path: "control_plane/tools/ib_facts.yml"
+test_infiniband_vars_dir: "test_vars/test_infiniband_vars.yml"
+random_ip: 100.100.100.100
+invalid_username: 'invalid_username'
+
+# Usage: test_infiniband_facts.yml
+validation_port: 1/5
+port_num: 5
+search_line: "  ib 1/6:"
+line_to_add: '      - "mtu 2K"'
+time_to_pause: 2
+line_to_search: "^(.*)port 8"
+ib_success_msg: "successful"
+ib_fail_msg: "failed"

+ 70 - 0
control_plane/test/test_vars/test_inventory_vars.yml

@@ -0,0 +1,70 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Usage: test_inventory_validation.yml
+idrac_search_key: "Integrated Dell Remote Access Controller"
+ethernet_search_key: "Dell EMC Networking OS10-Enterprise"
+infiniband_search_key: "MLNX-OS"
+me4_powervault_search_key: "ME4"
+idrac_fail_msg: "iDRAC IP validation is failed"
+idrac_success_msg: "iDRAC IP validation is success"
+ethernet_fail_msg: "Ethernet IP validation is failed"
+ethernet_success_msg: "Ethernet IP validation is success"
+infiniband_fail_msg: "Infiniband IP validation is failed"
+infiniband_success_msg: "Infiniband IP validation is success"
+powervault_fail_msg: "Powervault IP validation is failed"
+powervault_success_msg: "Powervault IP validation is success"
+failed_msg: "Failed. Please check input parameters and try again!"
+
+# Usage: test_inventory.yml
+login_vars_path: "../input_params/login_vars.yml"
+login_vars_vault_path: "../input_params/.login_vault_key"
+tower_config_file_path: "../roles/webui_awx/files/.tower_cli.cfg"
+tower_vault_file_path: "../roles/webui_awx/files/.tower_vault_key"
+awx_namespace: "awx"
+awx_pod_regex: 'awx-([A-Za-z0-9]{10})-([A-Za-z0-9]{5})'
+awx_pod_item_regex: "awx-([A-Za-z0-9]{10})-([A-Za-z0-9]{5})"
+ethernet_switch_support: true
+ib_switch_support: true
+powervault_support: true
+idrac_inventory_name: "idrac_inventory"
+ib_inventory_name: "infiniband_inventory"
+ethernet_inventory_name: "ethernet_inventory"
+pv_inventory_name: "powervault_me4_inventory"
+node_inventory_name: "node_inventory"
+validation_script_path: "test_inventory_validation.yml" 
+infiniband_status_code: 302
+org_description: "Name of organization using this product"
+manager_group: "manager" 
+compute_group: "compute" 
+login_group: "login" 
+nfs_group: "nfs" 
+group_fail_msg: "Group verification is failed"
+group_success_msg: " Group verification is successful"
+awx_fail_msg: "awx-operator containers creation failed"
+awx_success_msg: "awx-operator containers creation is successful"
+empty_host_err: "No hosts available"
+node_fail_msg: "Updation of new node is failed"
+node_success_msg: "Updation of new node is successful"
+inventory_fail_msg: "Inventory creation is failed"
+inventory_success_msg: "Inventory creation is successful"
+job_template_fail_msg: "Template creation is failed"
+job_template_success_msg: "Template creation is successful"
+project_fail_msg: " Project creation is failed"
+project_success_msg: "Project creation is successful"
+organization_fail_msg: "Organization is not created"
+organization_success_msg: "Organization is created"
+schedule_fail_msg: "Schedules are not created"
+schedule_success_msg: "Schedules are created"

+ 4 - 1
control_plane/test/test_vars/test_powervault_vars.yml

@@ -36,6 +36,7 @@ user_locale: "English"
 pool_a: "A"
 pool_b: "B"
 pool_type: "Virtual"
+linear_pool_type: "Linear"
 raid1_level: "RAID1"
 raid5_level: "RAID5"
 raid6_level: "RAID6"
@@ -43,6 +44,7 @@ raid10_level: "RAID10"
 raid_adapt_level: "ADAPT"
 disk_type_ssdsas: "SSD SAS"
 disk_type_sasmdl: "SAS MDL"
+disk_group_name_omnia: "omnia"
 disk_group_name_omnia_dgA01: "omnia_dgA01"
 disk_group_name_omnia_dgA02: "omnia_dgA02"
 disk_group_name_omnia_dgB01: "omnia_dgB01"
@@ -52,6 +54,7 @@ volume_name_k8s_v2: "k8s_V2"
 volume_name_slurm_v2: "slurm_V2"
 volume_size_input_100gb: "100GB"
 volume_size_100gb: "99.9GB"
+disk_range_1_2: "0.1-2"
 disk_range_5_6: "0.5-6"
 disk_range_3_4: "0.3-4"
 disk_range_7_10: "0.7-10"
@@ -72,7 +75,7 @@ login_vars_path: "../input_params/login_vars.yml"
 login_vars_vault_path: "../input_params/.login_vault_key"
 powervault_me4_var_path: "../input_params/powervault_me4_vars.yml"
 base_var_path: "../input_params/base_vars.yml"
-powervault_inventory_name: "powervault_me4_inventory"
+powervault_inventory_name: "test_powervault_me4_inventory"
 template_value: "powervault_me4_template"
 job_name: "test_powervault_me4_template"
 powervault_playbook_path: "control_plane/powervault_me4.yml"

+ 3 - 3
control_plane/tools/provision_report.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@
 - name: Fetch provision_password
   hosts: localhost
   connection: local
-  gather_facts: no
+  gather_facts: false
   roles:
     - fetch_password
 
@@ -50,7 +50,7 @@
 
 - name: Find reachable hosts using ssh
   hosts: reachable
-  gather_facts: False
+  gather_facts: false
   ignore_unreachable: true
   remote_user: "root"
   vars:

+ 7 - 2
control_plane/tools/roles/fetch_password/tasks/main.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -42,4 +42,9 @@
   command: >-
     ansible-vault encrypt {{ role_path }}/../../../{{ login_vars_filename }}
     --vault-password-file {{ role_path }}/../../../{{ vault_filename }}
-  changed_when: false
+  changed_when: false
+
+- name: Update login_vars.yml permission
+  file:
+    path: "{{ role_path }}/../../../{{ login_vars_filename }}"
+    mode: "{{ file_perm }}"

+ 9 - 4
control_plane/tools/roles/hpc_cluster_report/tasks/main.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -42,9 +42,14 @@
   file:
     path: "{{ role_path}}/files"
     state: directory
-    
+
+- name: Fetch cobbler pod name
+  command: kubectl get pods -n cobbler -o jsonpath="{.items[0].metadata.name}"
+  register: cobbler_pod 
+  changed_when: false
+
 - name: Copy dhcpd.leases from cobbler
-  command: docker cp cobbler:/var/lib/dhcpd/dhcpd.leases {{ role_path}}/files/dhcpd.leases
+  command: kubectl cp {{ cobbler_pod.stdout }}:/var/lib/dhcpd/dhcpd.leases {{ role_path}}/files/dhcpd.leases -n cobbler
   changed_when: true
 
 - name: Fetch ethernet details of unreachable hosts
@@ -88,4 +93,4 @@
 
 - name: Display provision host report
   debug:
-    var: host_report.stdout_lines
+    var: host_report.stdout_lines

+ 8 - 14
control_plane/tools/roles/hpc_cluster_report/templates/provision_host_report.j2

@@ -1,12 +1,10 @@
 HPC Cluster
 -----------
 Reachable Hosts:
-{% if reachable_host_number > 0 %}
+{% if reachable_host_number | int > 0 %}
 {% for host in groups['reachable_ssh'] %}
-{% if reachable_host_number == 1 %}
-  inet={{ host }}, link/ether={{ ethernet_detail_reachable.stdout | replace(';','')}}
-{% elif reachable_host_number > 1 %}
-{% if ethernet_detail_reachable.results[loop.index|int - 1].stdout | length > 1 %}
+{% if reachable_host_number | int >= 1 %}
+{% if ethernet_detail_reachable.results[loop.index|int - 1].stdout | length | int > 1 %}
   inet={{ host }}, link/ether={{ ethernet_detail_reachable.results[loop.index|int - 1].stdout | replace(';','')}}
 {% else %}
   inet={{ host }}, link/ether=Refer to mapping file provided
@@ -17,22 +15,18 @@ Reachable Hosts:
 Total reachable hosts: {{ reachable_host_number }}
 
 Unreachable Hosts:
-{% if unreachable_ping_host_number > 0 %}
+{% if unreachable_ping_host_number | int > 0 %}
 {% for host in groups['ungrouped'] %}
-{% if unreachable_ping_host_number == 1 %}
-  inet={{ host }}, link/ether={{ ethernet_detail_unreachable_ping.stdout | replace(';','')}}
-{% elif unreachable_ping_host_number > 1 %}
+{% if unreachable_ping_host_number | int >=  1 %}
   inet={{ host }}, link/ether={{ ethernet_detail_unreachable_ping.results[loop.index|int - 1].stdout | replace(';','')}}
 {% endif %}
 {% endfor %}
 {% endif %}
-{% if unreachable_ssh_host_number > 0 %}
+{% if unreachable_ssh_host_number | int  > 0 %}
 {% for host in groups['unreachable_ssh'] %}
-{% if unreachable_ssh_host_number == 1 %}
-  inet={{ host }}, link/ether={{ ethernet_detail_unreachable_ssh.stdout | replace(';','')}}
-{% elif unreachable_ssh_host_number > 1 %}
+{% if unreachable_ssh_host_number | int >= 1 %}
   inet={{ host }}, link/ether={{ ethernet_detail_unreachable_ssh.results[loop.index|int - 1].stdout | replace(';','')}}
 {% endif %}
 {% endfor %}
 {% endif %}
-Total unreachable hosts: {{ unreachable_host_number }}
+Total unreachable hosts: {{ unreachable_host_number }}

+ 1 - 1
control_plane/tools/roles/idrac_2fa/tasks/configure_smtp.yml

@@ -35,7 +35,7 @@
     manager_attributes:
       RemoteHosts.1.SMTPServerIPAddress: "{{ smtp_server_ip }}"
       RemoteHosts.1.SMTPPort: 25
-      RemoteHosts.1.SMTPAuthentication: "Enabled"
+      RemoteHosts.1.SMTPAuthentication: "{{ smtp_authentication }}"
       RemoteHosts.1.SMTPUserName: "{{ smtp_username }}"
       RemoteHosts.1.SMTPPassword: "{{ smtp_password }}"
       EmailAlert.1.Address: "{{ use_email_address_2fa }}"

+ 9 - 3
control_plane/tools/roles/idrac_2fa/tasks/validate_2fa_vars.yml

@@ -76,10 +76,16 @@
           ipv4_static_dns1 | length < 1 or
           ipv4_static_dns2 | length < 1 or
           smtp_server_ip | length < 1 or
-          smtp_username | length < 1 or
-          smtp_password | length < 1 or
           use_email_address_2fa | length < 1 
 
+    - name: Validate SMTP parameters if smtp_authentication is enabled
+      fail:
+        msg: "{{ smtp_input_fail_msg }} when smtp_authentication is enabled"
+      when:
+        - smtp_authentication | lower == "enabled"
+        - smtp_username | length < 1 or
+          smtp_password | length < 1
+
     - name: Assert use_email_address_2fa value
       assert:
         that: '"@" in use_email_address_2fa'
@@ -93,4 +99,4 @@
       changed_when: false
       run_once: true
       when: "'$ANSIBLE_VAULT;' in config_content.stdout"
-  when: two_factor_authentication == "enabled"
+  when: two_factor_authentication == "enabled"

+ 64 - 14
omnia.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,10 +21,10 @@
     - cluster_validation
 
 - name: Gather facts from all the nodes
-  hosts: all
+  hosts: manager, compute, login_node, nfs_node
 
 - name: Apply common installation and config
-  hosts: manager, compute
+  hosts: manager, compute, login_node
   gather_facts: false
   roles:
     - common
@@ -72,23 +72,73 @@
     - k8s_firewalld
   tags: kubernetes
 
+- name: Powervault Server Configuration
+  hosts: nfs_node
+  gather_facts: false
+  tasks:
+    - name: Configuring NFS node
+      include_role:
+        name: powervault_me4_nfs
+      when: hostvars['127.0.0.1']['powervault_status']
+
+- name: Map volume
+  hosts: powervault_me4
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Include map volume task
+      include_tasks: "{{ playbook_dir }}/control_plane/roles/powervault_me4/tasks/ports.yml"
+      when: hostvars['127.0.0.1']['powervault_status']
+
+    - name: Include map volume task
+      include_tasks: "{{ playbook_dir }}/control_plane/roles/powervault_me4/tasks/map_volume.yml"
+      when: hostvars['127.0.0.1']['powervault_status']
+
+- name: Apply NFS server setup on NFS node
+  hosts: nfs_node
+  gather_facts: false
+  tasks:
+    - name: Check the mapped volume on server
+      include_role:
+        name: powervault_me4_nfs
+        tasks_from: nfs_volume
+      when: hostvars['127.0.0.1']['powervault_status']
+
+    - name: Mount partitions
+      include_role:
+         name: powervault_me4_nfs
+         tasks_from: mount_me4_partitions
+      when: hostvars['127.0.0.1']['powervault_status']
+
+    - name: Setup NFS server on the partitions
+      include_role:
+         name: powervault_me4_nfs
+         tasks_from: me4_nfs_server_setup
+      when: hostvars['127.0.0.1']['powervault_status']
+
 - name: Apply NFS server setup on manager node
   hosts: manager
   gather_facts: false
-  roles:
-    - k8s_nfs_server_setup
-  tags:
-    - kubernetes
-    - nfs
+  tasks:
+    - name: Apply NFS server setup on manager node
+      include_role:
+        name: k8s_nfs_server_setup
+      when: not hostvars['127.0.0.1']['powervault_status']
+      tags:
+        - kubernetes
+        - nfs
 
 - name: Apply NFS client setup on compute nodes
   hosts: compute
   gather_facts: false
-  roles:
-    - k8s_nfs_client_setup
-  tags:
-    - kubernetes
-    - nfs
+  tasks:
+    - name: Apply NFS client setup on compute nodes
+      include_role:
+        name: k8s_nfs_client_setup
+      when: not hostvars['127.0.0.1']['powervault_status']
+      tags:
+        - kubernetes
+        - nfs
 
 - name: Start K8s on manager server
   hosts: manager
@@ -156,4 +206,4 @@
 
 - name: Passwordless SSH between manager and compute nodes
   include: control_plane/tools/passwordless_ssh.yml
-  when: hostvars['127.0.0.1']['appliance_status']
+  when: hostvars['127.0.0.1']['control_plane_status']

+ 4 - 2
roles/k8s_nfs_client_setup/tasks/main.yml

@@ -21,12 +21,14 @@
   tags: nfs_client
 
 - name: Check mounted share
-  shell: mount | grep nfs
+  shell: >
+    set -o pipefail && \
+    mount | grep nfs
   changed_when: false
   args:
     warn: false
   register: mounted_share
-  ignore_errors: True
+  failed_when: false
   tags: nfs_client
 
 - name: Creating directory to mount NFS Share

+ 1 - 1
roles/k8s_nfs_client_setup/vars/main.yml

@@ -19,6 +19,6 @@ nfs_share_dir: /home/k8snfs
 
 mounthost: "{{ groups['manager'][0] }}"
 
-nfs_mnt_dir_mode: 0755
+nfs_mnt_dir_mode: 0777
 
 fstab_file_path: /etc/fstab

+ 14 - 4
roles/k8s_start_services/tasks/deploy_k8s_services.yml

@@ -108,10 +108,20 @@
   changed_when: true
   tags: init
 
-- name: Start NFS Client Provisioner
-  command: "helm install stable/nfs-client-provisioner --set nfs.server='{{ nfs_server }}' --set nfs.path='{{ nfs_path }}' --generate-name"
+- name: Start NFS Client Provisioner using NFS on manager node
+  command: "helm install stable/nfs-client-provisioner --set nfs.server='{{ nfs_server_manager_node }}' --set nfs.path='{{ nfs_share_dir }}' --generate-name"
   changed_when: true
-  when: "'nfs-client-provisioner' not in k8s_pods.stdout"
+  when:
+    - "'nfs-client-provisioner' not in k8s_pods.stdout"
+    - not hostvars['127.0.0.1']['powervault_status']
+  tags: init
+
+- name: Start NFS Client Provisioner using NFS on NFS Node
+  command: "helm install stable/nfs-client-provisioner --set nfs.server='{{ nfs_server_nfs_node }}' --set nfs.path='{{ me4_nfs_share_k8s }}' --generate-name"
+  changed_when: true
+  when:
+    - "'nfs-client-provisioner' not in k8s_pods.stdout"
+    - hostvars['127.0.0.1']['powervault_status']
   tags: init
 
 - name: Set NFS-Client Provisioner as DEFAULT StorageClass
@@ -217,4 +227,4 @@
   command: helm install my-release spark-operator/spark-operator --set image.tag={{ operator_image_tag }} --namespace spark-operator --create-namespace
   changed_when: true
   when: "'spark-operator' not in k8s_pods.stdout"
-  tags: init
+  tags: init

+ 3 - 3
roles/k8s_start_services/vars/main.yml

@@ -61,9 +61,9 @@ k8s_dashboard_yaml_url: https://raw.githubusercontent.com/kubernetes/dashboard/v
 
 helm_stable_repo_url: https://charts.helm.sh/stable
 
-nfs_server: "{{ ansible_host }}"
+nfs_server_manager_node: "{{ ansible_host }}"
 
-nfs_path: /home/k8snfs
+nfs_server_nfs_node: "{{ groups['nfs_node'][0] }}"
 
 mpi_operator_yaml_url: https://raw.githubusercontent.com/kubeflow/mpi-operator/master/deploy/v1alpha2/mpi-operator.yaml
 
@@ -93,4 +93,4 @@ spark_operator_repo: https://googlecloudplatform.github.io/spark-on-k8s-operator
 
 operator_image_tag: v1beta2-1.2.3-3.1.1
 
-volcano_scheduling_yaml_url: https://raw.githubusercontent.com/volcano-sh/volcano/v1.3.0/installer/volcano-development.yaml
+volcano_scheduling_yaml_url: https://raw.githubusercontent.com/volcano-sh/volcano/v1.3.0/installer/volcano-development.yaml

+ 21 - 0
roles/powervault_me4_nfs/tasks/main.yml

@@ -0,0 +1,21 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Include variable file for powervault
+  include_vars: "{{ pv_nfs_file }}"
+  no_log: true
+
+- name: Configure the server
+  include_tasks: nfs_node_configure.yml

+ 79 - 0
roles/powervault_me4_nfs/tasks/me4_nfs_server_setup.yml

@@ -0,0 +1,79 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Install nfs-utils
+  package:
+    name: nfs-utils
+    state: present
+
+- name: Install firewalld
+  package:
+    name: firewalld
+    state: present
+
+- name: Start and enable firewalld
+  service:
+    name: firewalld
+    state: started
+    enabled: yes
+
+- name: Start and enable rpcbind and nfs-server service
+  service:
+    name: "{{ item }}"
+    state: restarted
+    enabled: yes
+  with_items:
+    - rpcbind
+    - nfs-server
+
+- name: Adding K8s NFS share entries in /etc/exports
+  lineinfile:
+    path: "{{ exports_file_path }}"
+    line: "{{ me4_nfs_share_k8s }} {{ item }}(rw,sync,no_root_squash)"
+  with_items:
+    - "{{ groups['manager'] }}"
+    - "{{ groups['compute'] }}"
+
+- name: Adding K8s NFS share entries in /etc/exports
+  lineinfile:
+    path: "{{ exports_file_path }}"
+    line: "{{ me4_nfs_share_k8s }} {{ item }}(rw,sync,no_root_squash)"
+  with_items:
+    - "{{ groups['manager'] }}"
+    - "{{ groups['compute'] }}"
+
+- name: Adding Slurm NFS share entries in /etc/exports
+  lineinfile:
+    path: "{{ exports_file_path }}"
+    line: "{{ me4_nfs_share_slurm }} {{ item }}(rw,sync,no_root_squash)"
+  with_items:
+    - "{{ groups['manager'] }}"
+    - "{{ groups['compute'] }}"
+
+- name: Exporting the shared directories
+  command: exportfs -ra
+  changed_when: true
+
+- name: Configuring firewall
+  firewalld:
+    service: "{{ item }}"
+    permanent: true
+    state: enabled
+  with_items:
+    - "{{ nfs_services }}"
+
+- name: Reload firewalld
+  command: firewall-cmd --reload
+  changed_when: true

+ 111 - 0
roles/powervault_me4_nfs/tasks/mount_me4_partitions.yml

@@ -0,0 +1,111 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Get ME4 volume
+  shell: >
+    set -o pipefail && \
+    lsscsi -s | grep ME4
+  changed_when: false
+  register: me4_output
+  failed_when: false
+
+- name: ME4 volume check
+  fail:
+    msg: "{{ me4_volume_fail_msg }}"
+  when: me4_output is failed or (me4_output.stdout | regex_findall('ME4') | length) != 2
+
+- name: Set ME4 data facts
+  set_fact:
+    me4_k8s_volume_data: "{{ me4_output.stdout.split('\n')[0].split(' ') | select() }}"
+    me4_slurm_volume_data: "{{ me4_output.stdout.split('\n')[1].split(' ') | select() }}"
+
+- name: Add ME4 volume data to dummy host
+  add_host:
+    name:   "NFS_NODE_TOKEN_HOLDER"
+    me4_k8s_volume: "{{ me4_k8s_volume_data[-2] }}"
+    me4_slurm_volume: "{{ me4_slurm_volume_data[-2] }}"
+
+- name: Get all mounted partitions
+  command: df -h
+  changed_when: false
+  register: mounted_partitions
+
+- name: Create partition on ME4 volumes
+  command: "parted -a optimal {{ item }} --script -- mklabel gpt mkpart primary 0% {{ powervault_me4_disk_partition_size }}"
+  changed_when: true
+  with_items:
+    - "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_k8s_volume'] }}"
+    - "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_slurm_volume'] }}"
+  when:
+    - hostvars['NFS_NODE_TOKEN_HOLDER']['me4_k8s_volume'] not in mounted_partitions.stdout
+    - hostvars['NFS_NODE_TOKEN_HOLDER']['me4_slurm_volume'] not in mounted_partitions.stdout
+
+- name: Update kernel with new partition changes
+  command: partprobe
+  changed_when: false
+
+- name: Check ME4 mounted partitions
+  shell: >
+    set -o pipefail && \
+    mount | grep me4
+  failed_when: false
+  changed_when: false
+  args:
+    warn: false
+  register: me4_mounted_partitions
+
+- name: Set file system on partition
+  shell: >
+    set -o pipefail && \
+    echo y | mkfs -t ext4 {{ item }}1
+  with_items:
+    - "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_k8s_volume'] }}"
+    - "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_slurm_volume'] }}"
+  when:
+    - me4_nfs_share_k8s not in me4_mounted_partitions.stdout
+    - me4_nfs_share_slurm not in me4_mounted_partitions.stdout
+
+- name: Creating NFS share directories
+  file:
+    path: "{{ item }}"
+    state: directory
+    mode: "{{ nfs_share_dir_mode }}"
+  with_items:
+    - "{{ me4_nfs_share_k8s }}"
+    - "{{ me4_nfs_share_slurm }}"
+
+- name: Mount K8s partition on K8s NFS share
+  command: "mount {{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_k8s_volume'] }}1 {{ me4_nfs_share_k8s }}"
+  changed_when: true
+  args:
+    warn: false
+  when: me4_nfs_share_k8s not in me4_mounted_partitions.stdout
+
+- name: Mount Slurm partition on Slurm NFS share
+  command: "mount {{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_slurm_volume'] }}1 {{ me4_nfs_share_slurm }}"
+  changed_when: true
+  args:
+    warn: false
+  when: me4_nfs_share_slurm not in me4_mounted_partitions.stdout
+
+- name: Configuring auto mount K8s partition on reboot
+  lineinfile:
+    path: "{{ fstab_file_path }}"
+    line: "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_k8s_volume'] }}1            {{ me4_nfs_share_k8s }}      ext4            defaults        0     0"
+
+- name: Configuring auto mount Slurm partition on reboot
+  lineinfile:
+    path: "{{ fstab_file_path }}"
+    line: "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_slurm_volume'] }}1            {{ me4_nfs_share_slurm }}      ext4            defaults        0     0"

+ 137 - 0
roles/powervault_me4_nfs/tasks/nfs_node_configure.yml

@@ -0,0 +1,137 @@
+
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Include
+  include_tasks: "{{ playbook_dir }}/control_plane/roles/powervault_me4/tasks/ports.yml"
+
+- name: Refresh ssh keys
+  command: ssh-keygen -R {{ groups['powervault_me4'][0] }}
+  changed_when: false
+  tags: install
+  failed_when: false
+
+- name: Validate authentication of username and password
+  command: ping -c1 {{ groups['powervault_me4'][0] }}
+  register: validate_login
+  changed_when: false
+  failed_when: false
+
+- name: Install packages
+  package:
+    name: iscsi-initiator-utils
+    state: present
+  tags: install
+
+- name: Install packages
+  package:
+    name: sg3_utils
+    state: present
+  tags: install
+
+- name: Set bootproto value
+  lineinfile:
+    path: "{{ nic_path }}"
+    regexp: '^BOOTPROTO='
+    line: 'BOOTPROTO=none'
+  register: result
+
+- name: Set onboot value
+  lineinfile:
+    path: "{{ nic_path }}"
+    regexp: '^ONBOOT='
+    line: 'ONBOOT=yes'
+
+- name: Add ip address
+  lineinfile:
+    path: "{{ nic_path }}"
+    insertafter: '^ONBOOT=yes'
+    line: 'IPADDR={{ pv_nic_ip }}'
+
+- name: Add netmask address
+  lineinfile:
+    path: "{{ nic_path }}"
+    insertafter: '^IPADDR={{ pv_nic_ip }}'
+    line: NETMASK=255.255.255.0
+
+- name: Down the nic
+  command: ifdown {{ pv_nic }}
+  changed_when: true
+  failed_when: false
+  tags: install
+
+- name: Up the nic
+  command: ifup {{ pv_nic }}
+  changed_when: true
+  tags: install
+
+- name: Show ip
+  shell: >
+    set -o pipefail && \
+    ifconfig {{ pv_nic }} | grep 'inet' |cut -d: -f2 |  awk '{ print $2}'
+  changed_when: false
+
+- name: Discover nodes
+  command: iscsiadm -m discovery -t sendtargets -p {{ item }}
+  with_items: "{{ set_port_ip }}"
+  register: ports_available
+  failed_when: false
+  changed_when: false
+  tags: install
+
+- name: Pv port ip
+  add_host:
+    name: pv
+    map_ip: "{{ item.item }}"
+  with_items: "{{ ports_available.results }}"
+  when: item.rc == 0
+
+- name: Pv port ip
+  set_fact:
+    map_ip_output: "{{ item.stdout_lines }}"
+  with_items: "{{ ports_available.results }}"
+  when: item.rc == 0
+
+- name: Find feasible port ip
+  set_fact:
+    discover: "{{ item }}"
+  with_items: "{{ map_ip_output }}"
+  when: hostvars['pv']['map_ip'] in item
+
+- name: Split on comma
+  set_fact:
+    ip_port: "{{ discover.split(',')[0] }}"
+
+- name: Pv name
+  set_fact:
+    pv_name: "{{ discover.split(',')[1].split()[1] }}"
+
+- name: IQDN id
+  shell: >
+    set -o pipefail && \
+    cat /etc/iscsi/initiatorname.iscsi | cut -f2 -d"="
+  register: iqdn_id
+  changed_when: false
+  tags: install
+
+- name: Add ME4 volume data to dummy host
+  add_host:
+    name:   "server_iqdn_id"
+    server_iqdn: "{{ iqdn_id.stdout }}"
+
+- name: Login to the powervault
+  command: iscsiadm -m node --login {{ pv_name }} -p {{ ip_port }}
+  changed_when: true
+  tags: install

+ 40 - 0
roles/powervault_me4_nfs/tasks/nfs_volume.yml

@@ -0,0 +1,40 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Refresh ssh keys
+  command: ssh-keygen -R {{ groups['powervault_me4'][0] }}
+  changed_when: false
+  tags: install
+  failed_when: false
+
+- name: Validate authentication of username and password
+  command: ping -c1 {{ groups['powervault_me4'][0] }}
+  register: validate_login
+  changed_when: false
+  failed_when: false
+
+- name: Scan for getting the volume
+  command: rescan-scsi-bus.sh --forcerescan
+  changed_when: false
+  register: volume_pv
+  tags: install
+
+- name: Assert if volume created or not
+  assert:
+    that:
+      - "' Model: ME4' in volume_pv.stdout"
+    success_msg: "Volume is created"
+    fail_msg: "Volume is not created properly."
+  tags: install

+ 38 - 0
roles/powervault_me4_nfs/vars/main.yml

@@ -0,0 +1,38 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# vars file for powervault_me4_nfs
+
+# Usage: mount_me4_partitions.yml
+me4_volume_fail_msg: "ME4 volumes not found!"
+me4_nfs_share_k8s: "/me4_k8s_nfs"
+me4_nfs_share_slurm: "/me4_slurm_nfs"
+fstab_file_path: "/etc/fstab"
+
+# Usage: me4_nfs_server_setup.yml
+exports_file_path: /etc/exports
+nfs_services:
+  - mountd
+  - rpc-bind
+  - nfs
+nfs_share_dir_mode: 0777
+
+# Usage: nfs_node_configure.yml
+pv_nic: "{{ powervault_me4_server_nic }}"
+pv_nic_ip: 192.168.25.3
+pv_nic_gateway: 192.168.25.1
+pv_port_ip: 192.168.25.5
+pv_nfs_file: "{{ role_path }}/../../control_plane/input_params/powervault_me4_vars.yml"
+nic_path: "/etc/sysconfig/network-scripts/ifcfg-{{ powervault_me4_server_nic }}"