فهرست منبع

Merge branch 'devel' into devel

Lucas A. Wilson 4 سال پیش
والد
کامیت
b9800628c0

+ 2 - 2
appliance/appliance_config.yml

@@ -14,12 +14,12 @@
 ---
 ---
 
 
 # Password used while deploying OS on bare metal servers and for Cobbler UI.
 # Password used while deploying OS on bare metal servers and for Cobbler UI.
-# The Length of the password should be atleast 8.
+# The Length of the password should be at least 8.
 # The password must not contain -,\, ',"
 # The password must not contain -,\, ',"
 provision_password: ""
 provision_password: ""
 
 
 # Password used for the AWX UI.
 # Password used for the AWX UI.
-# The Length of the password should be atleast 8.
+# The Length of the password should be at least 8.
 # The password must not contain -,\, ',"
 # The password must not contain -,\, ',"
 awx_password: ""
 awx_password: ""
 
 

+ 4 - 0
appliance/roles/common/tasks/docker_installation.yml

@@ -55,6 +55,10 @@
     state: present
     state: present
   tags: install
   tags: install
 
 
+- name: Update pip
+  command: pip3 install --upgrade pip
+  changed_when: false
+
 - name: Installation using python3
 - name: Installation using python3
   pip:
   pip:
     name: "{{ docker_compose }}"
     name: "{{ docker_compose }}"

+ 13 - 13
appliance/roles/common/vars/main.yml

@@ -41,11 +41,11 @@ hostname: github.com
 port_no: 22
 port_no: 22
 os_name: CentOS
 os_name: CentOS
 os_version: '7.9' 
 os_version: '7.9' 
-internet_status: "Failed: No Internet connection.Connect to Internet."
-os_status: "Unsupported OS or OS version.OS must be {{ os_name }} and Version must be {{ os_version }} or more"
+internet_status: "Failed. No Internet connection. Make sure network is up."
+os_status: "Unsupported OS or OS version. OS should be {{ os_name }} and Version should be {{ os_version }} or more"
 selinux_status: "SElinux is not disabled. Disable it in /etc/sysconfig/selinux and reboot the system"
 selinux_status: "SElinux is not disabled. Disable it in /etc/sysconfig/selinux and reboot the system"
 iso_name: CentOS-7-x86_64-Minimal-2009.iso
 iso_name: CentOS-7-x86_64-Minimal-2009.iso
-iso_fail: "Iso file absent: Download and copy the iso file in omnia/appliance/roles/provision/files"
+iso_fail: "Iso file not found. Download and copy the iso file to omnia/appliance/roles/provision/files"
 
 
 # Usage: docker_installation.yml
 # Usage: docker_installation.yml
 docker_repo_url: https://download.docker.com/linux/centos/docker-ce.repo
 docker_repo_url: https://download.docker.com/linux/centos/docker-ce.repo
@@ -61,28 +61,28 @@ docker_volume_name: omnia-storage
 
 
 # Usage: password_config.yml
 # Usage: password_config.yml
 input_config_filename: "appliance_config.yml"
 input_config_filename: "appliance_config.yml"
-fail_msg_provision_password: "Failed. Incorrect provision_password format provided in input_config.yml file"
+fail_msg_provision_password: "Failed. Incorrect provision_password format provided in appliance_config.yml file"
 success_msg_provision_password: "provision_password validated"
 success_msg_provision_password: "provision_password validated"
-fail_msg_awx_password: "Failed. Incorrect awx_password format provided in input_config.yml file"
+fail_msg_awx_password: "Failed. Incorrect awx_password format provided in appliance_config.yml file"
 success_msg_awx_password: "awx_password validated"
 success_msg_awx_password: "awx_password validated"
-fail_msg_hpc_nic: "Failed. Incorrect hpc_nic format provided in input_config.yml file"
+fail_msg_hpc_nic: "Failed. Incorrect hpc_nic format provided in appliance_config.yml file"
 success_msg_hpc_nic: "hpc_nic validated"
 success_msg_hpc_nic: "hpc_nic validated"
-fail_msg_public_nic: "Failed. Incorrect public_nic format provided in input_config.yml file"
+fail_msg_public_nic: "Failed. Incorrect public_nic format provided in appliance_config.yml file"
 success_msg_public_nic: "public_nic validated"
 success_msg_public_nic: "public_nic validated"
 success_mapping_file: "mapping_file_exists validated"
 success_mapping_file: "mapping_file_exists validated"
-fail_mapping_file: "Failed. Incorrect mapping_file_exists value in input_config.yml. It should be either true or false"
-input_config_failure_msg: "Please provide all the required parameters in input_config.yml"
+fail_mapping_file: "Failed. Incorrect mapping_file_exists value in appliance_config.yml. It should be either true or false"
+input_config_failure_msg: "Please provide all the required parameters in appliance_config.yml"
 success_dhcp_range: "Dhcp_range validated"
 success_dhcp_range: "Dhcp_range validated"
-fail_dhcp_range: "Failed: Incorrect range assigned for dhcp"
+fail_dhcp_range: "Failed. Incorrect range assigned for dhcp"
 success_hpc_ip: "IP validated"
 success_hpc_ip: "IP validated"
-fail_hpc_ip: "Failed: Nic should be configured"
+fail_hpc_ip: "Failed. Nic should be configured"
 min_length: 8
 min_length: 8
 max_length: 30
 max_length: 30
 nic_min_length: 3
 nic_min_length: 3
 vault_filename: .vault_key
 vault_filename: .vault_key
 config_filename: "omnia_config.yml"
 config_filename: "omnia_config.yml"
 config_vaultname: .omnia_vault_key
 config_vaultname: .omnia_vault_key
-fail_msg_mariadb_password: "maria_db password not given in correct format."
+fail_msg_mariadb_password: "Failed. Incorrect mariadb_password format provided in omnia_config.yml file"
 success_msg_mariadb_password: "mariadb_password validated"
 success_msg_mariadb_password: "mariadb_password validated"
 success_msg_k8s_cni: "Kubernetes CNI Validated"
 success_msg_k8s_cni: "Kubernetes CNI Validated"
-fail_msg_k8s_cni: "Kubernetes CNI not correct."
+fail_msg_k8s_cni: "Failed. Kubernetes CNI is incorrect in omnia_config.yml"

+ 0 - 1
appliance/roles/provision/files/Dockerfile

@@ -39,7 +39,6 @@ COPY kickstart.yml /root
 COPY tftp.yml /root
 COPY tftp.yml /root
 COPY inventory_creation.yml /root
 COPY inventory_creation.yml /root
 COPY centos7.ks /var/lib/cobbler/kickstarts
 COPY centos7.ks /var/lib/cobbler/kickstarts
-COPY first-sync.sh /usr/local/bin/first-sync.sh
 
 
 EXPOSE 69 80 443 25151
 EXPOSE 69 80 443 25151
 
 

+ 0 - 19
appliance/roles/provision/files/first-sync.sh

@@ -1,19 +0,0 @@
-
- timeout=30
-while ! netstat -laputen | grep -i listen | grep 25151 1>/dev/null 2>&1
-do
-  sleep 1
-  timeout=$((${timeout} - 1))
-  if [ ${timeout} -eq 0 ]
-  then
-    echo "ERROR: cobblerd is not running."
-    exit 1
-  fi
-done
-sleep 2
-echo "cobbler get-loaders"
-cobbler get-loaders
-echo "cobbler sync"
-cobbler sync
-echo "cobbler check"
-cobbler check

+ 16 - 2
appliance/roles/provision/files/tftp.yml

@@ -13,7 +13,7 @@
 #  limitations under the License.
 #  limitations under the License.
 ---
 ---
 
 
-- name: Start tftp
+- name: Start tftp and dhcp
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
   tasks:
   tasks:
@@ -29,4 +29,18 @@
       command: systemctl start tftp.service
       command: systemctl start tftp.service
       args:
       args:
         warn: no
         warn: no
-      when: "('inactive' in tftp_status.stdout) or ('unknown' in tftp_status.stdout)"
+      when: "('inactive' in tftp_status.stdout) or ('unknown' in tftp_status.stdout)"
+
+    - name: Fetch dhcp status
+      command: systemctl is-active dhcpd
+      args:
+        warn: no
+      register: dhcp_status
+      ignore_errors: yes
+      changed_when: false
+
+    - name: Start dhcp if inactive state
+      command: systemctl start dhcpd.service
+      args:
+        warn: no
+      when: "('inactive' in dhcp_status.stdout) or ('unknown' in dhcp_status.stdout)"

+ 1 - 0
appliance/roles/provision/tasks/check_prerequisites.yml

@@ -61,6 +61,7 @@
   command: docker exec cobbler cobbler profile list
   command: docker exec cobbler cobbler profile list
   changed_when: false
   changed_when: false
   register: cobbler_profile_list
   register: cobbler_profile_list
+  ignore_errors: true
   when: cobbler_container_status == true
   when: cobbler_container_status == true
 
 
 - name: Check crontab list
 - name: Check crontab list

+ 1 - 1
appliance/roles/provision/tasks/provision_password.yml

@@ -50,7 +50,7 @@
   replace:
   replace:
     path: "{{ role_path }}/files/centos7.ks"
     path: "{{ role_path }}/files/centos7.ks"
     regexp: '^url --url http://ip/cblr/links/CentOS7-x86_64/'
     regexp: '^url --url http://ip/cblr/links/CentOS7-x86_64/'
-    replace: url --url http://{{ public_ip }}/cblr/links/CentOS7-x86_64/
+    replace: url --url http://{{ hpc_ip }}/cblr/links/CentOS7-x86_64/
   tags: install
   tags: install
 
 
 - name: Random phrase generation
 - name: Random phrase generation

+ 4 - 4
appliance/roles/provision/vars/main.yml

@@ -17,13 +17,13 @@
 
 
 #Usage: mapping_file.yml
 #Usage: mapping_file.yml
 mapping_file_name: mapping_file.csv
 mapping_file_name: mapping_file.csv
-mapping_file_fail: "Mapping file absent: Copy the mapping file in omnia/appliance/roles/provision/files"
-fail_hostname_duplicate:  "Duplicate hostname exists. Please check"
+mapping_file_fail: "Mapping file not found. Copy the mapping_file.csv to omnia/appliance/roles/provision/files"
+fail_hostname_duplicate:  "Duplicate hostname exists. Please verify mapping file again."
 remove_header: awk 'NR > 1 { print }' {{ role_path }}/files/new_mapping_file.csv
 remove_header: awk 'NR > 1 { print }' {{ role_path }}/files/new_mapping_file.csv
 
 
 #Usage: check_prerequisite.yml
 #Usage: check_prerequisite.yml
 iso_name: CentOS-7-x86_64-Minimal-2009.iso
 iso_name: CentOS-7-x86_64-Minimal-2009.iso
-iso_fail: "Iso file absent: Download and copy the iso file in omnia/appliance/roles/provision/files"
+iso_fail: "Iso file not found. Download and copy the iso file to omnia/appliance/roles/provision/files"
 
 
 # Usage: provision_password.yml
 # Usage: provision_password.yml
 provision_encrypted_dest: ../files/
 provision_encrypted_dest: ../files/
@@ -35,7 +35,7 @@ docker_image_tag: latest
 cobbler_run_command: docker run -itd --privileged --net=host --restart=always -v {{ mount_path }}:/root/omnia  -v cobbler_www:/var/www/cobbler:Z -v cobbler_backup:/var/lib/cobbler/backup:Z -v /mnt/iso:/mnt:Z -p 69:69/udp -p 81:80 -p 443:443 -p 25151:25151 --name cobbler  cobbler:latest  /sbin/init
 cobbler_run_command: docker run -itd --privileged --net=host --restart=always -v {{ mount_path }}:/root/omnia  -v cobbler_www:/var/www/cobbler:Z -v cobbler_backup:/var/lib/cobbler/backup:Z -v /mnt/iso:/mnt:Z -p 69:69/udp -p 81:80 -p 443:443 -p 25151:25151 --name cobbler  cobbler:latest  /sbin/init
 
 
 # Usage: main.yml
 # Usage: main.yml
-message_skipped: "Installation Skipped: Cobbler instance is already running on your system"
+message_skipped: "Installation Skipped: Cobbler instance is already running in your system"
 message_installed: "Installation Successful"
 message_installed: "Installation Successful"
 
 
 # Usage: mount_iso.yml
 # Usage: mount_iso.yml

+ 14 - 11
appliance/test/input_config_empty.yml

@@ -14,26 +14,29 @@
 ---
 ---
 
 
 # Password used while deploying OS on bare metal servers and for Cobbler UI.
 # Password used while deploying OS on bare metal servers and for Cobbler UI.
-# The Length of the password should be more than 7.
+# The Length of the password should be at least 8.
 # The password must not contain -,\, ',"
 # The password must not contain -,\, ',"
 provision_password: ""
 provision_password: ""
 
 
 # Password used for the AWX UI.
 # Password used for the AWX UI.
-# The Length of the password should be more than 7.
+# The Length of the password should be at least 8.
 # The password must not contain -,\, ',"
 # The password must not contain -,\, ',"
 awx_password: ""
 awx_password: ""
 
 
-# Password used for Slurm database.
-# The Length of the password should be more than 7.
-# The password must not contain -,\, ',"
-mariadb_password: ""
-
 # The nic/ethernet card that needs to be connected to the HPC switch.
 # The nic/ethernet card that needs to be connected to the HPC switch.
 # This nic will be configured by Omnia for the DHCP server.
 # This nic will be configured by Omnia for the DHCP server.
 # Default value of nic is em1.
 # Default value of nic is em1.
 hpc_nic: "em1"
 hpc_nic: "em1"
 
 
-# The nic card that needs to be connected to the public internet.
-# The public_nic should be em2, em1 or em3
-# Default value of nic is em2.
-public_nic: "em2"
+# The nic/ethernet card that will be connected to the public internet.
+# Default value of nic is em2
+public_nic: "em2"
+
+# The mapping file consists of the MAC address and its respective IP address and hostname.
+# If user wants to provide a mapping file, set this value to "true"
+# The format of mapping file should be MAC,hostname,IP and must be a CSV file.
+mapping_file_exists: ""
+
+# The dhcp range for assigning the IP address to the baremetal nodes.
+dhcp_start_ip_range: ""
+dhcp_end_ip_range: ""

+ 15 - 12
appliance/test/input_config_test.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
 #  you may not use this file except in compliance with the License.
@@ -14,26 +14,29 @@
 ---
 ---
 
 
 # Password used while deploying OS on bare metal servers and for Cobbler UI.
 # Password used while deploying OS on bare metal servers and for Cobbler UI.
-# The Length of the password should be more than 7.
+# The Length of the password should be at least 8.
 # The password must not contain -,\, ',"
 # The password must not contain -,\, ',"
 provision_password: "omnia@123"
 provision_password: "omnia@123"
 
 
 # Password used for the AWX UI.
 # Password used for the AWX UI.
-# The Length of the password should be more than 7.
+# The Length of the password should be at least 8.
 # The password must not contain -,\, ',"
 # The password must not contain -,\, ',"
 awx_password: "omnia@123"
 awx_password: "omnia@123"
 
 
-# Password used for Slurm database.
-# The Length of the password should be more than 7.
-# The password must not contain -,\, ',"
-mariadb_password: "omnia@123"
-
 # The nic/ethernet card that needs to be connected to the HPC switch.
 # The nic/ethernet card that needs to be connected to the HPC switch.
 # This nic will be configured by Omnia for the DHCP server.
 # This nic will be configured by Omnia for the DHCP server.
 # Default value of nic is em1.
 # Default value of nic is em1.
 hpc_nic: "em1"
 hpc_nic: "em1"
 
 
-# The nic card that needs to be connected to the public internet.
-# The public_nic should be em2, em1 or em3
-# Default value of nic is em2.
-public_nic: "em2"
+# The nic/ethernet card that will be connected to the public internet.
+# Default value of nic is em2
+public_nic: "em2"
+
+# The mapping file consists of the MAC address and its respective IP address and hostname.
+# If user wants to provide a mapping file, set this value to "true"
+# The format of mapping file should be MAC,hostname,IP and must be a CSV file.
+mapping_file_exists: "false"
+
+# The dhcp range for assigning the IP address to the baremetal nodes.
+dhcp_start_ip_range: "172.17.0.20"
+dhcp_end_ip_range: "172.17.0.100"

+ 370 - 42
appliance/test/test_common.yml

@@ -361,7 +361,9 @@
         replace: "{{ item.replace }}"
         replace: "{{ item.replace }}"
       with_items:
       with_items:
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_008
       tags: TC_008
 
 
     - block:
     - block:
@@ -415,7 +417,9 @@
         replace: "{{ item.replace }}"
         replace: "{{ item.replace }}"
       with_items:
       with_items:
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_009
       tags: TC_009
 
 
     - block:
     - block:
@@ -440,7 +444,7 @@
       tags: TC_009
       tags: TC_009
 
 
 # Testcase OMNIA_DIO_US_DVC_TC_010
 # Testcase OMNIA_DIO_US_DVC_TC_010
-# Execute common role in management station with mariadb_password as empty
+# Execute common role in management station with mapping_file_exists as empty
 - name: OMNIA_DIO_US_DVC_TC_010
 - name: OMNIA_DIO_US_DVC_TC_010
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
@@ -470,6 +474,8 @@
       with_items:
       with_items:
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_010
       tags: TC_010
 
 
     - block:
     - block:
@@ -482,7 +488,7 @@
       rescue:
       rescue:
         - name: Validate error
         - name: Validate error
           assert:
           assert:
-            that: input_config_failure_msg in input_config_check.msg
+            that: fail_mapping_file in mapping_file_check.msg
             success_msg: "{{ input_config_check_success_msg }}"
             success_msg: "{{ input_config_check_success_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
       tags: TC_010
       tags: TC_010
@@ -524,7 +530,9 @@
       with_items:
       with_items:
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ long_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ long_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_011
       tags: TC_011
 
 
     - block:
     - block:
@@ -579,7 +587,9 @@
       with_items:
       with_items:
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ white_space_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ white_space_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_012
       tags: TC_012
 
 
     - block:
     - block:
@@ -634,7 +644,9 @@
       with_items:
       with_items:
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ special_character_password1 }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ special_character_password1 }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_013
       tags: TC_013
 
 
     - block:
     - block:
@@ -689,7 +701,9 @@
       with_items:
       with_items:
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ special_character_password2 }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ special_character_password2 }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_014
       tags: TC_014
 
 
     - block:
     - block:
@@ -744,7 +758,9 @@
       with_items:
       with_items:
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ long_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ long_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_015
       tags: TC_015
 
 
     - block:
     - block:
@@ -799,7 +815,9 @@
       with_items:
       with_items:
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ white_space_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ white_space_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_016
       tags: TC_016
 
 
     - block:
     - block:
@@ -854,7 +872,9 @@
       with_items:
       with_items:
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ special_character_password1 }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ special_character_password1 }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_017
       tags: TC_017
 
 
     - block:
     - block:
@@ -909,7 +929,9 @@
       with_items:
       with_items:
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ special_character_password2 }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ special_character_password2 }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_018
       tags: TC_018
 
 
     - block:
     - block:
@@ -934,7 +956,7 @@
       tags: TC_018
       tags: TC_018
 
 
 # Testcase OMNIA_DIO_US_DVC_TC_019
 # Testcase OMNIA_DIO_US_DVC_TC_019
-# Execute common role in management station with mariadb_password of more than 31 characters
+# Execute common role in management station with mapping_file_exists status as true
 - name: OMNIA_DIO_US_DVC_TC_019
 - name: OMNIA_DIO_US_DVC_TC_019
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
@@ -962,9 +984,11 @@
         regexp: "{{ item.regexp }}"
         regexp: "{{ item.regexp }}"
         replace: "{{ item.replace }}"
         replace: "{{ item.replace }}"
       with_items:
       with_items:
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ long_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"true\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_019
       tags: TC_019
 
 
     - block:
     - block:
@@ -974,10 +998,10 @@
             tasks_from: "{{ password_config_file }}"
             tasks_from: "{{ password_config_file }}"
           vars:
           vars:
             input_config_filename: "{{ new_input_config_filename }}"
             input_config_filename: "{{ new_input_config_filename }}"
-      rescue:
-        - name: Validate error
+      always:
+        - name: Validate success message
           assert:
           assert:
-            that: fail_msg_mariadb_password in mariadb_password_check.msg
+            that: success_mapping_file in mapping_file_check.msg
             success_msg: "{{ input_config_check_success_msg }}"
             success_msg: "{{ input_config_check_success_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
       tags: TC_019
       tags: TC_019
@@ -989,7 +1013,7 @@
       tags: TC_019
       tags: TC_019
 
 
 # Testcase OMNIA_DIO_US_DVC_TC_020
 # Testcase OMNIA_DIO_US_DVC_TC_020
-# Execute common role in management station with mariadb_password as string contains white spaces
+# Execute common role in management station with mapping_file_exists status other than true or false
 - name: OMNIA_DIO_US_DVC_TC_020
 - name: OMNIA_DIO_US_DVC_TC_020
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
@@ -1017,9 +1041,11 @@
         regexp: "{{ item.regexp }}"
         regexp: "{{ item.regexp }}"
         replace: "{{ item.replace }}"
         replace: "{{ item.replace }}"
       with_items:
       with_items:
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ white_space_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"test\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_020
       tags: TC_020
 
 
     - block:
     - block:
@@ -1029,10 +1055,10 @@
             tasks_from: "{{ password_config_file }}"
             tasks_from: "{{ password_config_file }}"
           vars:
           vars:
             input_config_filename: "{{ new_input_config_filename }}"
             input_config_filename: "{{ new_input_config_filename }}"
-      always:
-        - name: Validate success message
+      rescue:
+        - name: Validate error
           assert:
           assert:
-            that: success_msg_mariadb_password in mariadb_password_check.msg
+            that: fail_mapping_file in mapping_file_check.msg
             success_msg: "{{ input_config_check_success_msg }}"
             success_msg: "{{ input_config_check_success_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
       tags: TC_020
       tags: TC_020
@@ -1044,7 +1070,7 @@
       tags: TC_020
       tags: TC_020
 
 
 # Testcase OMNIA_DIO_US_DVC_TC_021
 # Testcase OMNIA_DIO_US_DVC_TC_021
-# Execute common role in management station with mariadb_password as string contains characters like '/' and '-'
+# Execute common role in management station with dhcp_start_ip_range as empty
 - name: OMNIA_DIO_US_DVC_TC_021
 - name: OMNIA_DIO_US_DVC_TC_021
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
@@ -1072,9 +1098,10 @@
         regexp: "{{ item.regexp }}"
         regexp: "{{ item.regexp }}"
         replace: "{{ item.replace }}"
         replace: "{{ item.replace }}"
       with_items:
       with_items:
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ special_character_password1 }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_021
       tags: TC_021
 
 
     - block:
     - block:
@@ -1087,7 +1114,7 @@
       rescue:
       rescue:
         - name: Validate error
         - name: Validate error
           assert:
           assert:
-            that: fail_msg_mariadb_password in mariadb_password_check.msg
+            that: input_config_failure_msg in input_config_check.msg
             success_msg: "{{ input_config_check_success_msg }}"
             success_msg: "{{ input_config_check_success_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
       tags: TC_021
       tags: TC_021
@@ -1099,7 +1126,7 @@
       tags: TC_021
       tags: TC_021
 
 
 # Testcase OMNIA_DIO_US_DVC_TC_022
 # Testcase OMNIA_DIO_US_DVC_TC_022
-# Execute common role in management station with mariadb_password as string contains special characters other than '-', '/'
+# Execute common role in management station with dhcp_end_ip_range as empty
 - name: OMNIA_DIO_US_DVC_TC_022
 - name: OMNIA_DIO_US_DVC_TC_022
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
@@ -1127,9 +1154,10 @@
         regexp: "{{ item.regexp }}"
         regexp: "{{ item.regexp }}"
         replace: "{{ item.replace }}"
         replace: "{{ item.replace }}"
       with_items:
       with_items:
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ special_character_password2 }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
       tags: TC_022
       tags: TC_022
 
 
     - block:
     - block:
@@ -1139,10 +1167,10 @@
             tasks_from: "{{ password_config_file }}"
             tasks_from: "{{ password_config_file }}"
           vars:
           vars:
             input_config_filename: "{{ new_input_config_filename }}"
             input_config_filename: "{{ new_input_config_filename }}"
-      always:
-        - name: Validate success message
+      rescue:
+        - name: Validate error
           assert:
           assert:
-            that: success_msg_mariadb_password in mariadb_password_check.msg
+            that: input_config_failure_msg in input_config_check.msg
             success_msg: "{{ input_config_check_success_msg }}"
             success_msg: "{{ input_config_check_success_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
       tags: TC_022
       tags: TC_022
@@ -1154,7 +1182,7 @@
       tags: TC_022
       tags: TC_022
 
 
 # Testcase OMNIA_DIO_US_DVC_TC_023
 # Testcase OMNIA_DIO_US_DVC_TC_023
-# Execute common role in management station with mariadb_password of 30 characters
+# Execute common role in management station with mapping_file_exists status as false
 - name: OMNIA_DIO_US_DVC_TC_023
 - name: OMNIA_DIO_US_DVC_TC_023
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
@@ -1182,9 +1210,11 @@
         regexp: "{{ item.regexp }}"
         regexp: "{{ item.regexp }}"
         replace: "{{ item.replace }}"
         replace: "{{ item.replace }}"
       with_items:
       with_items:
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ max_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_023
       tags: TC_023
 
 
     - block:
     - block:
@@ -1197,7 +1227,7 @@
       always:
       always:
         - name: Validate success message
         - name: Validate success message
           assert:
           assert:
-            that: success_msg_mariadb_password in mariadb_password_check.msg
+            that: success_mapping_file in mapping_file_check.msg
             success_msg: "{{ input_config_check_success_msg }}"
             success_msg: "{{ input_config_check_success_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
       tags: TC_023
       tags: TC_023
@@ -1239,7 +1269,9 @@
       with_items:
       with_items:
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ max_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ max_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_024
       tags: TC_024
 
 
     - block:
     - block:
@@ -1294,7 +1326,9 @@
       with_items:
       with_items:
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ max_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ max_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
       tags: TC_025
       tags: TC_025
 
 
     - block:
     - block:
@@ -1349,7 +1383,9 @@
       with_items:
       with_items:
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
         - { regexp: "hpc_nic: \"em1\"", replace: "hpc_nic: \"em2\"" }
         - { regexp: "hpc_nic: \"em1\"", replace: "hpc_nic: \"em2\"" }
       tags: TC_026
       tags: TC_026
 
 
@@ -1405,7 +1441,9 @@
       with_items:
       with_items:
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
         - { regexp: "hpc_nic: \"em1\"", replace: "hpc_nic: \"\"" }
         - { regexp: "hpc_nic: \"em1\"", replace: "hpc_nic: \"\"" }
       tags: TC_027
       tags: TC_027
 
 
@@ -1461,7 +1499,9 @@
       with_items:
       with_items:
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
         - { regexp: "public_nic: \"em2\"", replace: "public_nic: \"\"" }
         - { regexp: "public_nic: \"em2\"", replace: "public_nic: \"\"" }
       tags: TC_028
       tags: TC_028
 
 
@@ -1487,7 +1527,7 @@
       tags: TC_028
       tags: TC_028
 
 
 # Testcase OMNIA_DIO_US_DVC_TC_029
 # Testcase OMNIA_DIO_US_DVC_TC_029
-# Execute common role in management station with public_nic other than em1, em2 or em3
+# Execute common role in management station with public_nic not present in device
 - name: OMNIA_DIO_US_DVC_TC_029
 - name: OMNIA_DIO_US_DVC_TC_029
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
@@ -1517,7 +1557,9 @@
       with_items:
       with_items:
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
         - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
-        - { regexp: "mariadb_password: \"\"", replace: "mariadb_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
         - { regexp: "public_nic: \"em2\"", replace: "public_nic: \"em5\"" }
         - { regexp: "public_nic: \"em2\"", replace: "public_nic: \"em5\"" }
       tags: TC_029
       tags: TC_029
 
 
@@ -1531,7 +1573,7 @@
       rescue:
       rescue:
         - name: Validate error
         - name: Validate error
           assert:
           assert:
-            that: fail_msg_public_nic in public_nic_check.msg
+            that: fail_hpc_ip in public_ip_check.msg
             success_msg: "{{ input_config_check_success_msg }}"
             success_msg: "{{ input_config_check_success_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
             fail_msg: "{{ input_config_check_fail_msg }}"
       tags: TC_029
       tags: TC_029
@@ -1540,4 +1582,290 @@
       file:
       file:
         path: "{{ new_input_config_filename }}"
         path: "{{ new_input_config_filename }}"
         state: absent
         state: absent
-      tags: TC_029
+      tags: TC_029
+
+# Testcase OMNIA_DIO_US_DVC_TC_030
+# Execute common role in management station with hpc_nic not present in device
+- name: OMNIA_DIO_US_DVC_TC_030
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_common_vars.yml
+    - ../roles/common/vars/main.yml
+  tasks:
+    - name: Delete "{{ new_input_config_filename }}"
+      file:
+        path: "{{ new_input_config_filename }}"
+        state: absent
+      tags: TC_030
+
+    - name: Copy "{{ empty_input_config_filename }}" to new file
+      copy:
+        src: "{{ empty_input_config_filename }}"
+        dest: "{{ new_input_config_filename }}"
+      tags: TC_030
+
+    - name: Edit "{{ new_input_config_filename }}"
+      replace:
+        path: "{{ new_input_config_filename }}"
+        regexp: "{{ item.regexp }}"
+        replace: "{{ item.replace }}"
+      with_items:
+        - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
+        - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
+        - { regexp: "hpc_nic: \"em1\"", replace: "hpc_nic: \"em5\"" }
+      tags: TC_030
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+            tasks_from: "{{ password_config_file }}"
+          vars:
+            input_config_filename: "{{ new_input_config_filename }}"
+      rescue:
+        - name: Validate error
+          assert:
+            that: fail_hpc_ip in hpc_ip_check.msg
+            success_msg: "{{ input_config_check_success_msg }}"
+            fail_msg: "{{ input_config_check_fail_msg }}"
+      tags: TC_030
+
+    - name: Delete "{{ new_input_config_filename }}"
+      file:
+        path: "{{ new_input_config_filename }}"
+        state: absent
+      tags: TC_030
+
+# Testcase OMNIA_DIO_US_DVC_TC_031
+# Execute common role with dhcp_start_ip_range in wrong ip range
+- name: OMNIA_DIO_US_DVC_TC_031
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_common_vars.yml
+    - ../roles/common/vars/main.yml
+  tasks:
+    - name: Delete "{{ new_input_config_filename }}"
+      file:
+        path: "{{ new_input_config_filename }}"
+        state: absent
+      tags: TC_031
+
+    - name: Copy "{{ empty_input_config_filename }}" to new file
+      copy:
+        src: "{{ empty_input_config_filename }}"
+        dest: "{{ new_input_config_filename }}"
+      tags: TC_031
+
+    - name: Edit "{{ new_input_config_filename }}"
+      replace:
+        path: "{{ new_input_config_filename }}"
+        regexp: "{{ item.regexp }}"
+        replace: "{{ item.replace }}"
+      with_items:
+        - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
+        - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ invalid_dhcp_ip }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
+      tags: TC_031
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+            tasks_from: "{{ password_config_file }}"
+          vars:
+            input_config_filename: "{{ new_input_config_filename }}"
+      rescue:
+        - name: Validate error
+          assert:
+            that: fail_dhcp_range in dhcp_start_ip_check.msg
+            success_msg: "{{ input_config_check_success_msg }}"
+            fail_msg: "{{ input_config_check_fail_msg }}"
+      tags: TC_031
+
+    - name: Delete "{{ new_input_config_filename }}"
+      file:
+        path: "{{ new_input_config_filename }}"
+        state: absent
+      tags: TC_031
+
+# Testcase OMNIA_DIO_US_DVC_TC_032
+# Execute common role with dhcp_start_ip_range in wrong format
+- name: OMNIA_DIO_US_DVC_TC_032
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_common_vars.yml
+    - ../roles/common/vars/main.yml
+  tasks:
+    - name: Delete "{{ new_input_config_filename }}"
+      file:
+        path: "{{ new_input_config_filename }}"
+        state: absent
+      tags: TC_032
+
+    - name: Copy "{{ empty_input_config_filename }}" to new file
+      copy:
+        src: "{{ empty_input_config_filename }}"
+        dest: "{{ new_input_config_filename }}"
+      tags: TC_032
+
+    - name: Edit "{{ new_input_config_filename }}"
+      replace:
+        path: "{{ new_input_config_filename }}"
+        regexp: "{{ item.regexp }}"
+        replace: "{{ item.replace }}"
+      with_items:
+        - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
+        - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ wrong_dhcp_ip }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ valid_dhcp_end_range }}\"" }
+      tags: TC_032
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+            tasks_from: "{{ password_config_file }}"
+          vars:
+            input_config_filename: "{{ new_input_config_filename }}"
+      rescue:
+        - name: Validate error
+          assert:
+            that: fail_dhcp_range in dhcp_start_ip_check.msg
+            success_msg: "{{ input_config_check_success_msg }}"
+            fail_msg: "{{ input_config_check_fail_msg }}"
+      tags: TC_032
+
+    - name: Delete "{{ new_input_config_filename }}"
+      file:
+        path: "{{ new_input_config_filename }}"
+        state: absent
+      tags: TC_032
+
+# Testcase OMNIA_DIO_US_DVC_TC_033
+#Execute common role with dhcp_end_ip_range in wrong format
+- name: OMNIA_DIO_US_DVC_TC_033
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_common_vars.yml
+    - ../roles/common/vars/main.yml
+  tasks:
+    - name: Delete "{{ new_input_config_filename }}"
+      file:
+        path: "{{ new_input_config_filename }}"
+        state: absent
+      tags: TC_033
+
+    - name: Copy "{{ empty_input_config_filename }}" to new file
+      copy:
+        src: "{{ empty_input_config_filename }}"
+        dest: "{{ new_input_config_filename }}"
+      tags: TC_033
+
+    - name: Edit "{{ new_input_config_filename }}"
+      replace:
+        path: "{{ new_input_config_filename }}"
+        regexp: "{{ item.regexp }}"
+        replace: "{{ item.replace }}"
+      with_items:
+        - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
+        - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ wrong_dhcp_ip }}\"" }
+      tags: TC_033
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+            tasks_from: "{{ password_config_file }}"
+          vars:
+            input_config_filename: "{{ new_input_config_filename }}"
+      rescue:
+        - name: Validate error
+          assert:
+            that: fail_dhcp_range in dhcp_start_ip_check.msg
+            success_msg: "{{ input_config_check_success_msg }}"
+            fail_msg: "{{ input_config_check_fail_msg }}"
+      tags: TC_033
+
+    - name: Delete "{{ new_input_config_filename }}"
+      file:
+        path: "{{ new_input_config_filename }}"
+        state: absent
+      tags: TC_033
+
+# Testcase OMNIA_DIO_US_DVC_TC_034
+#Execute common role with dhcp_end_ip_range in wrong ip range
+- name: OMNIA_DIO_US_DVC_TC_034
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_common_vars.yml
+    - ../roles/common/vars/main.yml
+  tasks:
+    - name: Delete "{{ new_input_config_filename }}"
+      file:
+        path: "{{ new_input_config_filename }}"
+        state: absent
+      tags: TC_034
+
+    - name: Copy "{{ empty_input_config_filename }}" to new file
+      copy:
+        src: "{{ empty_input_config_filename }}"
+        dest: "{{ new_input_config_filename }}"
+      tags: TC_034
+
+    - name: Edit "{{ new_input_config_filename }}"
+      replace:
+        path: "{{ new_input_config_filename }}"
+        regexp: "{{ item.regexp }}"
+        replace: "{{ item.replace }}"
+      with_items:
+        - { regexp: "provision_password: \"\"", replace: "provision_password: \"{{ min_length_password }}\"" }
+        - { regexp: "awx_password: \"\"", replace: "awx_password: \"{{ min_length_password }}\"" }
+        - { regexp: "mapping_file_exists: \"\"", replace: "mapping_file_exists: \"false\"" }
+        - { regexp: "dhcp_start_ip_range: \"\"", replace: "dhcp_start_ip_range: \"{{ valid_dhcp_start_range }}\"" }
+        - { regexp: "dhcp_end_ip_range: \"\"", replace: "dhcp_end_ip_range: \"{{ invalid_dhcp_ip }}\"" }
+      tags: TC_034
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+            tasks_from: "{{ password_config_file }}"
+          vars:
+            input_config_filename: "{{ new_input_config_filename }}"
+      rescue:
+        - name: Validate error
+          assert:
+            that: fail_dhcp_range in dhcp_start_ip_check.msg
+            success_msg: "{{ input_config_check_success_msg }}"
+            fail_msg: "{{ input_config_check_fail_msg }}"
+      tags: TC_034
+
+    - name: Delete "{{ new_input_config_filename }}"
+      file:
+        path: "{{ new_input_config_filename }}"
+        state: absent
+      tags: TC_034

+ 1 - 37
appliance/test/test_provision_cdip.yml

@@ -78,24 +78,6 @@
         success_msg: "{{ cobbler_cnt_success_msg }}"
         success_msg: "{{ cobbler_cnt_success_msg }}"
       tags: TC_001,VERIFY_001
       tags: TC_001,VERIFY_001
 
 
-    - name: Validate first NIC is not assigned to public internet
-      shell: |
-        set -o pipefail
-        ip route get 8.8.8.8 | awk '{print $5}'
-      register: nic_output
-      args:
-        executable: /bin/bash
-      failed_when: first_nic in nic_output.stdout
-      changed_when: false
-      tags: TC_001,VERIFY_001
-
-    - name: "Validate NIC-1 is assigned to IP {{ nic1_ip_address }}"
-      assert:
-        that: "'{{ nic1_ip_address }}' in ansible_em1.ipv4.address"
-        fail_msg: "{{ nic_check_fail_msg }}"
-        success_msg: "{{ nic_check_success_msg }}"
-      tags: TC_001,VERIFY_001
-
 # Testcase OMNIA_DIO_US_CDIP_TC_002
 # Testcase OMNIA_DIO_US_CDIP_TC_002
 # Execute provison role in management station where cobbler container and image already created
 # Execute provison role in management station where cobbler container and image already created
 - name: OMNIA_DIO_US_CDIP_TC_002
 - name: OMNIA_DIO_US_CDIP_TC_002
@@ -145,24 +127,6 @@
         success_msg: "{{ cobbler_cnt_success_msg }}"
         success_msg: "{{ cobbler_cnt_success_msg }}"
       tags: TC_002,VERIFY_002
       tags: TC_002,VERIFY_002
 
 
-    - name: Validate first NIC is not assigned to public internet
-      shell: |
-        set -o pipefail
-        ip route get 8.8.8.8 | awk '{print $5}'
-      register: nic_output
-      args:
-        executable: /bin/bash
-      failed_when: first_nic in nic_output.stdout
-      changed_when: false
-      tags: TC_002,VERIFY_002
-
-    - name: "Validate NIC-1 is assigned to IP {{ nic1_ip_address }}"
-      assert:
-        that: "'{{ nic1_ip_address }}' in ansible_em1.ipv4.address"
-        fail_msg: "{{ nic_check_fail_msg }}"
-        success_msg: "{{ nic_check_success_msg }}"
-      tags: TC_002,VERIFY_002
-
 # Testcase OMNIA_DIO_US_CDIP_TC_003
 # Testcase OMNIA_DIO_US_CDIP_TC_003
 # Execute provison role in management station where docker service not running
 # Execute provison role in management station where docker service not running
 - name: OMNIA_DIO_US_CDIP_TC_003
 - name: OMNIA_DIO_US_CDIP_TC_003
@@ -216,4 +180,4 @@
           service:
           service:
             name: docker
             name: docker
             state: started
             state: started
-      tags: TC_003
+      tags: TC_003

+ 9 - 5
appliance/test/test_vars/test_common_vars.yml

@@ -15,9 +15,9 @@
 
 
 # vars file for test_common.yml file
 # vars file for test_common.yml file
 centos_version: '7.8'
 centos_version: '7.8'
-test_input_config_filename: "input_config_test.yml"
-empty_input_config_filename: "input_config_empty.yml"
-new_input_config_filename: "input_config_new.yml"
+test_input_config_filename: "appliance_config_test.yml"
+empty_input_config_filename: "appliance_config_empty.yml"
+new_input_config_filename: "appliance_config_new.yml"
 password_config_file: "password_config"
 password_config_file: "password_config"
 min_length_password: "testpass"
 min_length_password: "testpass"
 max_length_password: "helloworld123helloworld12hello"
 max_length_password: "helloworld123helloworld12hello"
@@ -25,6 +25,10 @@ long_password: "helloworld123hellowordl12hello3"
 white_space_password: "hello world 123"
 white_space_password: "hello world 123"
 special_character_password1: "hello-world/"
 special_character_password1: "hello-world/"
 special_character_password2: "hello@$%!world"
 special_character_password2: "hello@$%!world"
+valid_dhcp_start_range: "172.17.0.10"
+valid_dhcp_end_range: "172.17.0.200"
+invalid_dhcp_ip: "1720.1700.1000.1000"
+wrong_dhcp_ip: "d6:dh1:dsj:10"
 
 
 docker_volume_success_msg: "Docker volume omnia-storage exists"
 docker_volume_success_msg: "Docker volume omnia-storage exists"
 docker_volume_fail_msg: "Docker volume omnia-storage does not exist"
 docker_volume_fail_msg: "Docker volume omnia-storage does not exist"
@@ -38,7 +42,7 @@ different_user_check_success_msg: "Different user execution check passed"
 different_user_check_fail_msg: "Different user execution check failed"
 different_user_check_fail_msg: "Different user execution check failed"
 selinux_check_success_msg: "selinux check passed"
 selinux_check_success_msg: "selinux check passed"
 selinux_check_fail_msg: "selinux check failed"
 selinux_check_fail_msg: "selinux check failed"
-input_config_check_success_msg: "input_config.yml validation passed"
-input_config_check_fail_msg: "input_config.yml validation failed"
+input_config_check_success_msg: "appliance_config.yml validation passed"
+input_config_check_fail_msg: "appliance_config.yml validation failed"
 install_package_success_msg: "Installation of package is successful"
 install_package_success_msg: "Installation of package is successful"
 install_package_fail_msg: "Installation of package is failed"
 install_package_fail_msg: "Installation of package is failed"

+ 4 - 4
appliance/test/test_vars/test_provision_vars.yml

@@ -25,14 +25,14 @@ cobbler_cnt_success_msg: "Docker container cobbler exists"
 nic_check_fail_msg: "NIC-1 ip address validation failed"
 nic_check_fail_msg: "NIC-1 ip address validation failed"
 nic_check_success_msg: "NIC-1 ip address validation successful"
 nic_check_success_msg: "NIC-1 ip address validation successful"
 cobbler_image_files:
 cobbler_image_files:
- - configure_nic
  - check_prerequisites
  - check_prerequisites
  - mount_iso
  - mount_iso
  - firewall_settings
  - firewall_settings
  - provision_password
  - provision_password
+ - dhcp_configure
  - cobbler_image
  - cobbler_image
 password_config_file: "password_config"
 password_config_file: "password_config"
-test_input_config_filename: "input_config_test.yml"
+test_input_config_filename: "appliance_config_test.yml"
 
 
 # Usage: test_provision_cc.yml
 # Usage: test_provision_cc.yml
 docker_check_success_msg: "Docker service stopped usescase validation successful"
 docker_check_success_msg: "Docker service stopped usescase validation successful"
@@ -57,8 +57,8 @@ crontab_list_fail_msg: "Crontab list validation failed"
 crontab_list_success_msg: "Crontab list validation successful"
 crontab_list_success_msg: "Crontab list validation successful"
 iso_check_fail_msg: "centos iso file check validation failed"
 iso_check_fail_msg: "centos iso file check validation failed"
 iso_check_success_msg: "centos iso file check validation successful"
 iso_check_success_msg: "centos iso file check validation successful"
-cobbler_service_check_fail_msg: "TFTP service validation failed"
-cobbler_service_check_success_msg: "TFTP service validation successful"
+cobbler_service_check_fail_msg: "cobbler service validation failed"
+cobbler_service_check_success_msg: "cobbler service validation successful"
 kickstart_filename: "centos7.ks"
 kickstart_filename: "centos7.ks"
 iso_file_path: "../roles/provision/files"
 iso_file_path: "../roles/provision/files"
 temp_iso_name: "temp_centos.iso"
 temp_iso_name: "temp_centos.iso"

+ 25 - 0
appliance/tools/provision_host_report.j2

@@ -0,0 +1,25 @@
+HPC Cluster
+-----------
+Reachable Hosts:
+{% if reachable_host_number > 0 %}
+{% for host in groups['reachable'] %}
+{% if reachable_host_number == 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_reachable.stdout | replace(';','')}}
+{% elif reachable_host_number > 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_reachable.results[loop.index|int - 1].stdout | replace(';','')}}
+{% endif %}
+{% endfor %}
+{% endif %}
+Total reachable hosts: {{ reachable_host_number }}
+
+Unreachable Hosts:
+{% if unreachable_host_number > 0 %}
+{% for host in groups['ungrouped'] %}
+{% if unreachable_host_number == 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_unreachable.stdout | replace(';','')}}
+{% elif unreachable_host_number > 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_unreachable.results[loop.index|int - 1].stdout | replace(';','')}}
+{% endif %}
+{% endfor %}
+{% endif %}
+Total unreachable hosts: {{ unreachable_host_number }}

+ 83 - 0
appliance/tools/provision_report.yml

@@ -0,0 +1,83 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# This file used to generate a report of reachable and unreachable host of hpc cluster
+# This file can be executed only if provisioned_hosts.yml is created inside the path omnia/appliance/roles/inventory/files/provisioned_hosts.yml
+
+# Command to execute: ansible-playbook provision_report.yml -i ../roles/inventory/files/provisioned_hosts.yml
+
+- name: Find reachable hosts
+  hosts: all
+  gather_facts: false
+  ignore_unreachable: true
+  ignore_errors: true
+  tasks:
+    - name: Check for reachable nodes
+      command: ping -c1 {{ inventory_hostname }}
+      delegate_to: localhost
+      register: ping_result
+      ignore_errors: yes
+      changed_when: false
+
+    - name: Group reachable hosts
+      group_by:
+        key: "reachable"
+      when: "'100% packet loss' not in ping_result.stdout"
+
+- name: Display hosts list
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Set reachable and unreachable host number
+      set_fact:
+        reachable_host_number: "{{ groups['reachable'] | length}}"
+        unreachable_host_number: "{{ groups['ungrouped'] | length}}"
+
+    - name: Copy dhcpd.leases from cobbler
+      command: docker cp cobbler:/var/lib/dhcpd/dhcpd.leases dhcpd.leases
+      changed_when: true
+
+    - name: Fetch ethernet details of unreachable hosts
+      shell: sed -n '/{{ item }}/,/ethernet/p' dhcpd.leases | grep "ethernet" | awk '{ print $3 }' | uniq
+      register: ethernet_detail_unreachable
+      changed_when: false
+      args:
+        warn: no
+      with_items:
+        - "{{ groups['ungrouped'] }}"
+
+    - name: Fetch ethernet details of reachable hosts
+      shell: sed -n '/{{ item }}/,/ethernet/p' dhcpd.leases | grep "ethernet" | awk '{ print $3 }' | uniq
+      register: ethernet_detail_reachable
+      changed_when: false
+      args:
+        warn: no
+      with_items:
+        - "{{ groups['reachable'] }}"
+
+    - name: Copy host information to file
+      template:
+        src: provision_host_report.j2
+        dest: provision_host_report.txt
+
+    - name: Read provision host report
+      command: cat provision_host_report.txt
+      register: host_report
+      changed_when: false
+
+    - name: Display provision host report
+      debug:
+        var: host_report.stdout_lines

+ 23 - 26
docs/INSTALL_OMNIA.md

@@ -3,30 +3,28 @@
 The following sections provide details on installing Omnia using CLI. If you want to install the Omnia appliance and manage workloads using the Omnia appliance, see [INSTALL_OMNIA_APPLIANCE](INSTALL_OMNIA_APPLIANCE.md) and [MONITOR_CLUSTERS](MONITOR_CLUSTERS.md) files for more information.
 The following sections provide details on installing Omnia using CLI. If you want to install the Omnia appliance and manage workloads using the Omnia appliance, see [INSTALL_OMNIA_APPLIANCE](INSTALL_OMNIA_APPLIANCE.md) and [MONITOR_CLUSTERS](MONITOR_CLUSTERS.md) files for more information.
 
 
 ## Prerequisties to install Omnia using CLI
 ## Prerequisties to install Omnia using CLI
-Ensure that all the prequisites listed in the [PREINSTALL_OMNIA](PREINSTALL_OMNIA.md) file are met before installing Omnia.
+Ensure that all the prerequisites listed in the [PREINSTALL_OMNIA](PREINSTALL_OMNIA.md) file are met before installing Omnia.
 
 
 ## Steps to install Omnia using CLI
 ## Steps to install Omnia using CLI
-__Note:__ The user should have root privileges to perform installations and configurations.  
-__Note:__ If there are errors when any of the following Ansible playbook commands are executed, re-run the commands again.
+__Note:__ If there are errors when any of the following Ansible playbook commands are run, re-run the commands again.  
+__Note:__ The user should have root privileges to perform installations and configurations.
 
 
-1. On the manager node, change the working directory to the directory where you want to clone the Omnia Git repository.
-2. Clone the Omnia repository.
+1. Clone the Omnia repository.
 ``` 
 ``` 
 $ git clone https://github.com/dellhpc/omnia.git 
 $ git clone https://github.com/dellhpc/omnia.git 
 ```
 ```
 __Note:__ After the Omnia repository is cloned, a folder named __omnia__ is created. It is recommended that you do not rename this folder.
 __Note:__ After the Omnia repository is cloned, a folder named __omnia__ is created. It is recommended that you do not rename this folder.
 
 
-3. Change the directory to __omnia__, by executing the following command:
-   `cd omnia`
+2. Change the directory to __omnia__: `cd omnia`
 
 
-4. An inventory file must be created in the __omnia__ folder. Add compute node IPs under **[compute]** group and the manager node IP under **[manager]** group. See the template INVENTORY file under `omnia\docs` folder.
+3. An inventory file must be created in the __omnia__ folder. Add compute node IPs under **[compute]** group and the manager node IP under **[manager]** group. See the INVENTORY template file under `omnia\docs` folder.
 
 
-5. To install Omnia, run the following command:
+4. To install Omnia, run the following command.
 ```
 ```
 ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2" 
 ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2" 
 ```
 ```
 
 
-6. By default, no skip tags are selected and both Kubernetes and Slurm will be deployed.  
+5. By default, no skip tags are selected and both Kubernetes and Slurm will be deployed.  
 To skip the installation of Kubernetes, enter:  
 To skip the installation of Kubernetes, enter:  
 `ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"  --skip-tags "kubernetes"`  
 `ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"  --skip-tags "kubernetes"`  
 Similarly, to skip Slurm, enter:  
 Similarly, to skip Slurm, enter:  
@@ -34,15 +32,14 @@ Similarly, to skip Slurm, enter:
 __Note:__ If you would like to skip the NFS client setup, enter the following command to skip the k8s_nfs_client_setup role of Kubernetes:  
 __Note:__ If you would like to skip the NFS client setup, enter the following command to skip the k8s_nfs_client_setup role of Kubernetes:  
 `ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"  --skip-tags "nfs_client"`
 `ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"  --skip-tags "nfs_client"`
 
 
-7. To provide password for mariaDB Database for Slurm accounting and Kubernetes CNI, edit the `omnia_config.yml` file.  
+6. To provide password for mariaDB Database for Slurm accounting and Kubernetes CNI, edit the `omnia_config.yml` file.  
 __Note:__ Supported Kubernetes CNI : calico and flannel. The default CNI is calico.  
 __Note:__ Supported Kubernetes CNI : calico and flannel. The default CNI is calico.  
 To view the set passwords of omnia_config.yml at a later time, run the following command:  
 To view the set passwords of omnia_config.yml at a later time, run the following command:  
 `ansible-vault view omnia_config.yml --vault-password-file .omnia_vault_key`
 `ansible-vault view omnia_config.yml --vault-password-file .omnia_vault_key`
 
 
-Omnia considers the following usernames as default:
-* `slurm` for MariaDB
+Omnia considers `slurm` as the default username for MariaDB.  
 
 
-The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file is executed:
+The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file is run:
 - __common__ role:
 - __common__ role:
 	- Install common packages on manager and compute nodes
 	- Install common packages on manager and compute nodes
 	- Docker is installed
 	- Docker is installed
@@ -50,7 +47,7 @@ The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file
 	- Install Nvidia drivers and software components
 	- Install Nvidia drivers and software components
 - **k8s_common** role: 
 - **k8s_common** role: 
 	- Required Kubernetes packages are installed
 	- Required Kubernetes packages are installed
-	- Starts the docker and kubernetes services.
+	- Starts the docker and Kubernetes services.
 - **k8s_manager** role: 
 - **k8s_manager** role: 
 	- __helm__ package for Kubernetes is installed.
 	- __helm__ package for Kubernetes is installed.
 - **k8s_firewalld** role: This role is used to enable the required ports to be used by Kubernetes. 
 - **k8s_firewalld** role: This role is used to enable the required ports to be used by Kubernetes. 
@@ -70,24 +67,24 @@ The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file
 - **k8s_start_services** role
 - **k8s_start_services** role
 	- Kubernetes services are deployed such as Kubernetes Dashboard, Prometheus, MetalLB and NFS client provisioner
 	- Kubernetes services are deployed such as Kubernetes Dashboard, Prometheus, MetalLB and NFS client provisioner
 
 
-__Note:__ After Kubernetes is installed and configured, few Kubernetes and calico/flannel related ports will be opened in the manager and compute nodes. This is required for Kubernetes Pod-to-Pod and Pod-to-Service communications. Calico/flannel provides a full networking stack for Kubernetes pods.
+__Note:__ After Kubernetes is installed and configured, few Kubernetes and calico/flannel related ports are opened in the manager and compute nodes. This is required for Kubernetes Pod-to-Pod and Pod-to-Service communications. Calico/flannel provides a full networking stack for Kubernetes pods.
 
 
-The following __Slurm__ roles are provided by Omnia when __omnia.yml__ file is executed:
+The following __Slurm__ roles are provided by Omnia when __omnia.yml__ file is run:
 - **slurm_common** role:
 - **slurm_common** role:
-	- Install the common packages on manager node and compute node.
+	- Installs the common packages on manager node and compute node.
 - **slurm_manager** role:
 - **slurm_manager** role:
-	- Install the packages only related to manager node
-	- This role also enables the required ports to be used by slurm.  
+	- Installs the packages only related to manager node
+	- This role also enables the required ports to be used by Slurm.  
 	    **tcp_ports**: 6817,6818,6819  
 	    **tcp_ports**: 6817,6818,6819  
 		**udp_ports**: 6817,6818,6819
 		**udp_ports**: 6817,6818,6819
-	- Creating and updating the slurm configuration files based on the manager node requirements.
+	- Creating and updating the Slurm configuration files based on the manager node requirements.
 - **slurm_workers** role:
 - **slurm_workers** role:
-	- Install the slurm packages into all compute nodes as per the compute node requirements.
+	- Installs the Slurm packages into all compute nodes as per the compute node requirements.
 - **slurm_start_services** role: 
 - **slurm_start_services** role: 
-	- Starting the slurm services so that compute node starts to communicate with manager node.
+	- Starting the Slurm services so that compute node communicates with manager node.
 - **slurm_exporter** role: 
 - **slurm_exporter** role: 
-	- slurm exporter is a package for exporting metrics collected from slurm resource scheduling system to prometheus.
-	- Slurm exporter is installed on the host just like slurm and slurm exporter will be successfully installed only if slurm is installed.
+	- Slurm exporter is a package for exporting metrics collected from Slurm resource scheduling system to prometheus.
+	- Slurm exporter is installed on the host like Slurm, and Slurm exporter will be successfully installed only if Slurm is installed.
 
 
 **Note:** If you want to install JupyterHub and Kubeflow playbooks, you have to first install the JupyterHub playbook and then install the Kubeflow playbook.
 **Note:** If you want to install JupyterHub and Kubeflow playbooks, you have to first install the JupyterHub playbook and then install the Kubeflow playbook.
 
 
@@ -97,4 +94,4 @@ Commands to install JupyterHub and Kubeflow:
 
 
 ## Adding a new compute node to the cluster
 ## Adding a new compute node to the cluster
 
 
-The user has to update the INVENTORY file present in omnia directory with the new node IP address in the compute group. Then, omnia.yml has to be executed to add the new node to the cluster and update the configurations of the manager node.
+The user has to update the INVENTORY file present in `omnia` directory with the new node IP address in the compute group. Then, `omnia.yml` has to be run to add the new node to the cluster and update the configurations of the manager node.

+ 44 - 49
docs/INSTALL_OMNIA_APPLIANCE.md

@@ -1,24 +1,23 @@
 # Install the Omnia appliance
 # Install the Omnia appliance
 
 
 ## Prerequisties
 ## Prerequisties
-Ensure that all the prequisites listed in the [PREINSTALL_OMNIA_APPLIANCE](PREINSTALL_OMNIA_APPLIANCE.md) file are met before installing Omnia appliance
-
-__Note:__ Changing the manager node after installation of Omnia is not supported by Omnia. If you want to change the manager node, you must redeploy the entire cluster.
+Ensure that all the prerequisites listed in the [PREINSTALL_OMNIA_APPLIANCE](PREINSTALL_OMNIA_APPLIANCE.md) file are met before installing Omnia appliance
 
 
+__Note:__ Changing the manager node after the installation of Omnia is not supported by Omnia. If you want to change the manager node, you must redeploy the entire cluster.  
+__Note:__ The user should have root privileges to perform installations and configurations.
 
 
 ## Steps to install the Omnia appliance
 ## Steps to install the Omnia appliance
-__Note:__ The user should have root privileges to perform installations and configurations using Omnia.
-__Note:__ If there are errors when any of the following Ansible playbook commands are executed, re-run the commands again.  
-
-1. Clone the Omnia repository.
+__Note:__ If there are errors when any of the following Ansible playbook commands are run, re-run the commands again.
+1. On the management node, change the working directory to the directory where you want to clone the Omnia Git repository.
+2. Clone the Omnia repository.
 ``` 
 ``` 
 $ git clone https://github.com/dellhpc/omnia.git 
 $ git clone https://github.com/dellhpc/omnia.git 
 ```
 ```
 __Note:__ After the Omnia repository is cloned, a folder named __omnia__ is created. It is recommended that you do not rename this folder.
 __Note:__ After the Omnia repository is cloned, a folder named __omnia__ is created. It is recommended that you do not rename this folder.
 
 
-2. Change the directory to `omnia/appliance`
-3. To provide passwords for Cobbler and AWX, edit the __`appliance_config.yml`__ file.
-* If user want to provide the mapping file for DHCP configuration, go to  __appliance_config.yml__ file there is variable name __mapping_file_exits__ set as __true__ otherwise __false__.
+3. Change the directory to `omnia/appliance`
+4. To provide passwords for Cobbler and AWX, edit the `appliance_config.yml` file.
+* To provide a mapping file for DHCP configuration, go to **appliance_config.yml** file and set the variable named **mapping_file_exits** as __true__, else set it to __false__.
 
 
 Omnia considers the following usernames as default:  
 Omnia considers the following usernames as default:  
 * `cobbler` for Cobbler Server
 * `cobbler` for Cobbler Server
@@ -26,28 +25,26 @@ Omnia considers the following usernames as default:
 * `slurm` for MariaDB
 * `slurm` for MariaDB
 
 
 **Note**: 
 **Note**: 
-* Minimum length of the password must be at least eight characters and maximum of 30 characters.
+* Minimum length of the password must be at least eight characters and a maximum of 30 characters.
 * Do not use these characters while entering a password: -, \\, "", and \'
 * Do not use these characters while entering a password: -, \\, "", and \'
 
 
-4. Using the `appliance_config.yml` file, you can also change the NIC for the DHCP server under *hpc_nic* and the NIC used to connect to the Internet under public_nic. Default values of both __hpc_nic__ and __public_nic__ is set to em1 and em2 respectively.
-5. The valid DHCP range for HPC cluster is set into two variables name __Dhcp_start_ip_range__ and __Dhcp_end_ip_range__ present in the __appliance_config.yml__ file.
-6. To provide password for mariaDB Database for Slurm accounting and Kubernetes CNI, edit the __`omnia_config.yml`__ file.
+5. Using the `appliance_config.yml` file, you can change the NIC for the DHCP server under **hpc_nic** and the NIC used to connect to the Internet under **public_nic**. Default values of **hpc_nic** and **public_nic** are set to em1 and em2 respectively.
+6. The valid DHCP range for HPC cluster is set in two variables named __Dhcp_start_ip_range__ and __Dhcp_end_ip_range__ present in the `appliance_config.yml` file.
+7. To provide passwords for mariaDB Database for Slurm accounting and Kubernetes CNI, edit the `omnia_config.yml` file.
 
 
-**Note**:
-* Supported Kubernetes CNI : calico and flannel, default is __calico__.
+__Note:__ Supported Kubernetes CNI : calico and flannel. The default CNI is calico.
 
 
-To view the set passwords of __`appliance_config.yml`__ at a later time, run the following command under omnia->appliance:
+To view the set passwords of `appliance_config.yml`, run the following command under omnia->appliance:
 ```
 ```
 ansible-vault view appliance_config.yml --vault-password-file .vault_key
 ansible-vault view appliance_config.yml --vault-password-file .vault_key
 ```
 ```
 
 
-To view the set passwords of __`omnia_config.yml`__ at a later time, run the following command:
+To view the set passwords of `omnia_config.yml`, run the following command:
 ```
 ```
 ansible-vault view omnia_config.yml --vault-password-file .omnia_vault_key
 ansible-vault view omnia_config.yml --vault-password-file .omnia_vault_key
 ```
 ```
 
 
-  
-7. To install Omnia, run the following command:
+8. To install Omnia, run the following command:
 ```
 ```
 ansible-playbook appliance.yml -e "ansible_python_interpreter=/usr/bin/python2"
 ansible-playbook appliance.yml -e "ansible_python_interpreter=/usr/bin/python2"
 ```
 ```
@@ -55,28 +52,27 @@ ansible-playbook appliance.yml -e "ansible_python_interpreter=/usr/bin/python2"
 Omnia creates a log file which is available at: `/var/log/omnia.log`.
 Omnia creates a log file which is available at: `/var/log/omnia.log`.
 
 
 **Provision operating system on the target nodes**  
 **Provision operating system on the target nodes**  
-Omnia role used: *provision*
-Ports used by __Cobbler__
+Omnia role used: *provision*  
+Ports used by __Cobbler__:  
 * __TCP__ ports: 80,443,69
 * __TCP__ ports: 80,443,69
 * __UDP__ ports: 69,4011
 * __UDP__ ports: 69,4011
 
 
 To create the Cobbler image, Omnia configures the following:
 To create the Cobbler image, Omnia configures the following:
-* Firewall settings are configured.
+* Firewall settings.
 * The kickstart file of Cobbler will enable the UEFI PXE boot.
 * The kickstart file of Cobbler will enable the UEFI PXE boot.
 
 
 To access the Cobbler dashboard, enter `https://<IP>/cobbler_web` where `<IP>` is the Global IP address of the management node. For example, enter
 To access the Cobbler dashboard, enter `https://<IP>/cobbler_web` where `<IP>` is the Global IP address of the management node. For example, enter
 `https://100.98.24.225/cobbler_web` to access the Cobbler dashboard.
 `https://100.98.24.225/cobbler_web` to access the Cobbler dashboard.
 
 
-__Note__: If a mapping file is not provided, the hostname to the server is given on the basis of following format: __compute<xxx>-<xxx>__ where "xxx" is the last 2 octets of Host Ip address
-After the Cobbler Server provisions the operating system on the nodes, IP addresses and host names are assigned by the DHCP service. The host names are assigned based on the following format: **compute\<xxx>-xxx** where **xxx** is the Host ID (last 2 octet) of the Host IP address. For example, if the Host IP address is 172.17.0.11 then assigned hostname will be compute0-11.
-__Note__: If a mapping file is provided, the hostnames follow the format provided in the mapping file.
+__Note__: After the Cobbler Server provisions the operating system on the nodes, IP addresses and host names are assigned by the DHCP service.  
+* If a mapping file is not provided, the hostname to the server is provided based on the following format: **computexxx-xxx** where "xxx-xxx" is the last two octets of Host IP address. For example, if the Host IP address is 172.17.0.11 then he assigned hostname by Omnia is compute0-11.  
+* If a mapping file is provided, the hostnames follow the format provided in the mapping file.
 
 
 **Install and configure Ansible AWX**  
 **Install and configure Ansible AWX**  
-Omnia role used: *web_ui*
+Omnia role used: *web_ui*  
 Port used by __AWX__ is __8081__.  
 Port used by __AWX__ is __8081__.  
 AWX repository is cloned from the GitHub path: https://github.com/ansible/awx.git 
 AWX repository is cloned from the GitHub path: https://github.com/ansible/awx.git 
 
 
-
 Omnia performs the following configuration on AWX:
 Omnia performs the following configuration on AWX:
 * The default organization name is set to **Dell EMC**.
 * The default organization name is set to **Dell EMC**.
 * The default project name is set to **omnia**.
 * The default project name is set to **omnia**.
@@ -87,10 +83,9 @@ Omnia performs the following configuration on AWX:
 
 
 To access the AWX dashboard, enter `http://<IP>:8081` where **\<IP>** is the Global IP address of the management node. For example, enter `http://100.98.24.225:8081` to access the AWX dashboard.
 To access the AWX dashboard, enter `http://<IP>:8081` where **\<IP>** is the Global IP address of the management node. For example, enter `http://100.98.24.225:8081` to access the AWX dashboard.
 
 
-***Note**: The AWX configurations are automatically performed Omnia and Dell Technologies recommends that you do not change the default configurations provided by Omnia as the functionality may be impacted.
-
-__Note__: Although AWX UI is accessible, hosts will be shown only after few nodes have been provisioned by a cobbler. It will take approx 10-15 mins. If any server is provisioned but user is not able to see any host on the AWX UI, then user can run __provision_report.yml__ playbook from __omnia__ -> __appliance__ ->__tools__ folder to see which hosts are reachable.
+**Note**: The AWX configurations are automatically performed Omnia and Dell Technologies recommends that you do not change the default configurations provided by Omnia as the functionality may be impacted.
 
 
+__Note__: Although AWX UI is accessible, hosts will be shown only after few nodes have been provisioned by Cobbler. It takes approximately 10 to 15 minutes to display the host details after the provisioning by Cobbler. If a server is provisioned but you are unable to view the host details on the AWX UI, then you can run **provision_report.yml** playbook from __omnia__ -> __appliance__ ->__tools__ folder to view the hosts which are reachable.
 
 
 ## Install Kubernetes and Slurm using AWX UI
 ## Install Kubernetes and Slurm using AWX UI
 Kubernetes and Slurm are installed by deploying the **DeployOmnia** template on the AWX dashboard.
 Kubernetes and Slurm are installed by deploying the **DeployOmnia** template on the AWX dashboard.
@@ -103,21 +98,21 @@ Kubernetes and Slurm are installed by deploying the **DeployOmnia** template on
 6. By default, no skip tags are selected and both Kubernetes and Slurm will be deployed. To install only Kubernetes, enter `slurm` and select **Create "slurm"**. Similarly, to install only Slurm, select and add `kubernetes` skip tag. 
 6. By default, no skip tags are selected and both Kubernetes and Slurm will be deployed. To install only Kubernetes, enter `slurm` and select **Create "slurm"**. Similarly, to install only Slurm, select and add `kubernetes` skip tag. 
 
 
 __Note:__
 __Note:__
-*	If you would like to skip the NFS client setup, enter _nfs_client in the skip tag section to skip the k8s_nfs_client_setup__ role of Kubernetes.
+*	If you would like to skip the NFS client setup, enter `nfs_client` in the skip tag section to skip the **k8s_nfs_client_setup** role of Kubernetes.
 
 
 7. Click **Next**.
 7. Click **Next**.
 8. Review the details in the **Preview** window, and click **Launch** to run the DeployOmnia template. 
 8. Review the details in the **Preview** window, and click **Launch** to run the DeployOmnia template. 
 
 
 To establish the passwordless communication between compute nodes and manager node:
 To establish the passwordless communication between compute nodes and manager node:
 1. In AWX UI, under __RESOURCES__ -> __Templates__, select __DeployOmnia__ template.
 1. In AWX UI, under __RESOURCES__ -> __Templates__, select __DeployOmnia__ template.
-2. From __Playbook dropdown__ menu, select __appliance/tools/passwordless_ssh.yml__ and __Launch__ the template.
+2. From __Playbook dropdown__ menu, select __appliance/tools/passwordless_ssh.yml__ and launch the template.
 
 
 __Note:__ If you want to install __JupyterHub__ and __Kubeflow__ playbooks, you have to first install the __JupyterHub__ playbook and then install the __Kubeflow__ playbook.
 __Note:__ If you want to install __JupyterHub__ and __Kubeflow__ playbooks, you have to first install the __JupyterHub__ playbook and then install the __Kubeflow__ playbook.
 
 
 __Note:__ To install __JupyterHub__ and __Kubeflow__ playbooks:
 __Note:__ To install __JupyterHub__ and __Kubeflow__ playbooks:
 *	From __AWX UI__, under __RESOURCES__ -> __Templates__, select __DeployOmnia__ template.
 *	From __AWX UI__, under __RESOURCES__ -> __Templates__, select __DeployOmnia__ template.
-*	From __Playbook dropdown__ menu, select __platforms/jupyterhub.yml__ option and __Launch__ the template to install jupyterhub playbook.
-*	From __Playbook dropdown__ menu, select __platforms/kubeflow.yml__ option and __Launch__ the template to install kubeflow playbook.
+*	From __Playbook dropdown__ menu, select __platforms/jupyterhub.yml__ option and launch the template to install JupyterHub playbook.
+*	From __Playbook dropdown__ menu, select __platforms/kubeflow.yml__ option and launch the template to install Kubeflow playbook.
 
 
 
 
 The DeployOmnia template may not run successfully if:
 The DeployOmnia template may not run successfully if:
@@ -125,9 +120,9 @@ The DeployOmnia template may not run successfully if:
 - The Compute group does not contain a host. Ensure that the Compute group must be assigned with a minimum of one host node.
 - The Compute group does not contain a host. Ensure that the Compute group must be assigned with a minimum of one host node.
 - Under Skip Tags, when both kubernetes and slurm tags are selected.
 - Under Skip Tags, when both kubernetes and slurm tags are selected.
 
 
-After **DeployOmnia** template is executed from the AWX UI, the **omnia.yml** file installs Kubernetes and Slurm, or either Kubernetes or slurm, as per the selection in the template on the management node. Additionally, appropriate roles are assigned to the compute and manager groups.
+After **DeployOmnia** template is run from the AWX UI, the **omnia.yml** file installs Kubernetes and Slurm, or either Kubernetes or slurm, as per the selection in the template on the management node. Additionally, appropriate roles are assigned to the compute and manager groups.
 
 
-The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file is executed:
+The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file is run:
 - __common__ role:
 - __common__ role:
 	- Install common packages on manager and compute nodes
 	- Install common packages on manager and compute nodes
 	- Docker is installed
 	- Docker is installed
@@ -135,7 +130,7 @@ The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file
 	- Install Nvidia drivers and software components
 	- Install Nvidia drivers and software components
 - **k8s_common** role: 
 - **k8s_common** role: 
 	- Required Kubernetes packages are installed
 	- Required Kubernetes packages are installed
-	- Starts the docker and kubernetes services.
+	- Starts the docker and Kubernetes services.
 - **k8s_manager** role: 
 - **k8s_manager** role: 
 	- __helm__ package for Kubernetes is installed.
 	- __helm__ package for Kubernetes is installed.
 - **k8s_firewalld** role: This role is used to enable the required ports to be used by Kubernetes. 
 - **k8s_firewalld** role: This role is used to enable the required ports to be used by Kubernetes. 
@@ -155,25 +150,25 @@ The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file
 - **k8s_start_services** role
 - **k8s_start_services** role
 	- Kubernetes services are deployed such as Kubernetes Dashboard, Prometheus, MetalLB and NFS client provisioner
 	- Kubernetes services are deployed such as Kubernetes Dashboard, Prometheus, MetalLB and NFS client provisioner
 
 
-__Note:__ After Kubernetes is installed and configured, few Kubernetes and calico/flannel related ports will be opened in the manager and compute nodes. This is required for Kubernetes Pod-to-Pod and Pod-to-Service communications. Calico/flannel provides a full networking stack for Kubernetes pods.
+__Note:__ After Kubernetes is installed and configured, few Kubernetes and calico/flannel related ports are opened in the manager and compute nodes. This is required for Kubernetes Pod-to-Pod and Pod-to-Service communications. Calico/flannel provides a full networking stack for Kubernetes pods.
 
 
-The following __Slurm__ roles are provided by Omnia when __omnia.yml__ file is executed:
+The following __Slurm__ roles are provided by Omnia when __omnia.yml__ file is run:
 - **slurm_common** role:
 - **slurm_common** role:
-	- Install the common packages on manager node and compute node.
+	- Installs the common packages on manager node and compute node.
 - **slurm_manager** role:
 - **slurm_manager** role:
-	- Install the packages only related to manager node
-	- This role also enables the required ports to be used by slurm.  
+	- Installs the packages only related to manager node
+	- This role also enables the required ports to be used by Slurm.  
 	    **tcp_ports**: 6817,6818,6819  
 	    **tcp_ports**: 6817,6818,6819  
 		**udp_ports**: 6817,6818,6819
 		**udp_ports**: 6817,6818,6819
-	- Creating and updating the slurm configuration files based on the manager node requirements.
+	- Creating and updating the Slurm configuration files based on the manager node requirements.
 - **slurm_workers** role:
 - **slurm_workers** role:
-	- Install the slurm packages into all compute nodes as per the compute node requirements.
+	- Installs the Slurm packages into all compute nodes as per the compute node requirements.
 - **slurm_start_services** role: 
 - **slurm_start_services** role: 
-	- Starting the slurm services so that compute node starts to communicate with manager node.
+	- Starting the Slurm services so that compute node communicates with manager node.
 - **slurm_exporter** role: 
 - **slurm_exporter** role: 
-	- slurm exporter is a package for exporting metrics collected from slurm resource scheduling system to prometheus.
-	- Slurm exporter is installed on the host just like slurm and slurm exporter will be successfully installed only if slurm is installed.
+	- Slurm exporter is a package for exporting metrics collected from Slurm resource scheduling system to prometheus.
+	- Slurm exporter is installed on the host like Slurm, and Slurm exporter will be successfully installed only if Slurm is installed.
 
 
 ## Adding a new compute node to the Cluster
 ## Adding a new compute node to the Cluster
 
 
-If a new node is provisioned through Cobbler, the node address is automatically displayed in AWX UI. This node does not belong to any group. The user can add the node to the compute group and execute __omnia.yml__ to add the new node to the cluster and update the configurations in the manager node.
+If a new node is provisioned through Cobbler, the node address is automatically displayed on the AWX dashboard. The node is not assigned to any group. You can add the node to the compute group and run `omnia.yml` to add the new node to the cluster and update the configurations in the manager node.

+ 11 - 5
docs/MONITOR_CLUSTERS.md

@@ -4,12 +4,17 @@ Omnia provides playbooks to configure additional software components for Kuberne
 __Note:__ To access the below dashboards, user has to login to the manager node and open the installed web browser.
 __Note:__ To access the below dashboards, user has to login to the manager node and open the installed web browser.
 
 
 __Note:__ If you are connecting remotely make sure your putty or any other similar client supports X11 forwarding. If you are using mobaxterm version 8 and above, follow the below mentioned steps:
 __Note:__ If you are connecting remotely make sure your putty or any other similar client supports X11 forwarding. If you are using mobaxterm version 8 and above, follow the below mentioned steps:
-1. `yum install firefox -y`
-2. `yum install xorg-x11-xauth`
-3. `logout and login back`
-4. To launch firefox from terminal use the following command: 
+1. To provide __ssh__ to the manager node.
+   `ssh -x root@<ip>` (where ip is the private ip of manager node)
+2. `yum install firefox -y`
+3. `yum install xorg-x11-xauth`
+4. `export DISPLAY=:10.0`
+5. `logout and login back`
+6. To launch firefox from terminal use the following command: 
    `firefox&`
    `firefox&`
 
 
+__Note:__ Everytime user logouts, the user have to run __export DISPLAY=:10.0__ command.
+
 ## Access Kuberentes Dashboard
 ## Access Kuberentes Dashboard
 1. To verify if the __Kubernetes-dashboard service__ is __running__, run the following command:
 1. To verify if the __Kubernetes-dashboard service__ is __running__, run the following command:
   `kubectl get pods --namespace kubernetes-dashboard`
   `kubectl get pods --namespace kubernetes-dashboard`
@@ -26,12 +31,13 @@ __Note:__ If you are connecting remotely make sure your putty or any other simil
 ## Access Kubeflow Dashboard
 ## Access Kubeflow Dashboard
 
 
 __Note:__ Use only port number between __8000-8999__
 __Note:__ Use only port number between __8000-8999__
+__Note:__ Suggested port number : 8085
 
 
 1. To see which are the ports are in use, use the following command:
 1. To see which are the ports are in use, use the following command:
    `netstat -an`
    `netstat -an`
 2. Choose port number from __8000-8999__ which is not in use.
 2. Choose port number from __8000-8999__ which is not in use.
 3. To run the __kubeflow__ dashboard at selected port number, run the following command:
 3. To run the __kubeflow__ dashboard at selected port number, run the following command:
-   `kubectl port-forward -n istio-system svc/istio-ingressgateway __selected-port-number__:80`
+   `kubectl port-forward -n kubeflow service/centraldashboard __selected_port_number__:80`
 4. On a web browser installed on the __manager node__, go to http://localhost:selected-port-number/ to launch the kubeflow central navigation dashboard.
 4. On a web browser installed on the __manager node__, go to http://localhost:selected-port-number/ to launch the kubeflow central navigation dashboard.
 
 
 ## Access JupyterHub Dashboard
 ## Access JupyterHub Dashboard

+ 2 - 1
docs/PREINSTALL_OMNIA.md

@@ -8,7 +8,8 @@ Omnia assumes that prior to installation:
 * On the manager node, install Ansible and Git using the following commands:
 * On the manager node, install Ansible and Git using the following commands:
 	* `yum install epel-release -y`
 	* `yum install epel-release -y`
 	* `yum install ansible git -y`  
 	* `yum install ansible git -y`  
-__Note:__ Ansible must be installed using __yum__. If Ansible is installed using __pip3__, re-install it using the __yum__ command.
+__Note:__ Ansible must be installed using __yum__. If Ansible is installed using __pip3__, re-install it using the __yum__ command again.
+
 
 
 ## Example system designs
 ## Example system designs
 Omnia can configure systems which use Ethernet- or Infiniband-based fabric to connect the compute servers.
 Omnia can configure systems which use Ethernet- or Infiniband-based fabric to connect the compute servers.

+ 2 - 4
docs/README.md

@@ -19,11 +19,9 @@ Omnia can install Kubernetes or Slurm (or both), along with additional drivers,
 ![Omnia Slurm Stack](images/omnia-slurm.png) 
 ![Omnia Slurm Stack](images/omnia-slurm.png) 
 
 
 ## Installing Omnia
 ## Installing Omnia
-Omnia requires that servers already have an RPM-based Linux OS running on them, and are all connected to the Internet. Currently all Omnia testing is done on [CentOS](https://centos.org). Please see [PREINSTALL_OMNIA](PREINSTALL_OMNIA.md) for instructions on network setup.
+Omnia requires that servers already have an RPM-based Linux OS running on them, and are all connected to the Internet. Currently all Omnia testing is done on [CentOS](https://centos.org). Please see [PREINSTALL](PREINSTALL.md) for instructions on network setup.
 
 
-Once servers have functioning OS and networking, you can using Omnia to install and start Slurm and/or Kubernetes. Please see [INSTALL_OMNIA](INSTALL_OMNIA.md) for detailed instructions.
-
-To install the Omnia appliance, see [PREINSTALL_OMNIA_APPLIANCE](PREINSTALL_OMNIA_APPLIANCE.md) and [INSTALL_OMNIA_APPLIANCE](INSTALL_OMNIA_APPLIANCE.md) files.
+Once servers have functioning OS and networking, you can using Omnia to install and start Slurm and/or Kubernetes. Please see [INSTALL](INSTALL_OMNIA.md) for detailed instructions.
 
 
 # Support Matrix
 # Support Matrix