浏览代码

Merge branch 'devel' into update_common

Lucas A. Wilson 3 年之前
父节点
当前提交
b01b4c195e

+ 6 - 3
control_plane/collect_device_info.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,9 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-- name: Dynamic Inventory
+
+- name: Collect control_plane device inventory
   hosts: localhost
   connection: local
-  gather_facts: no
+  gather_facts: false
   roles:
     - collect_device_info
+
+- import_playbook: "{{ playbook_dir }}/roles/collect_device_info/files/create_inventory.yml"

+ 4 - 3
control_plane/collect_node_info.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,9 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-- name: Dynamic Inventory
+
+- name: Collect control_plane host inventory
   hosts: localhost
   connection: local
-  gather_facts: no
+  gather_facts: false
   roles:
     - collect_node_info

+ 369 - 0
control_plane/roles/collect_device_info/files/create_inventory.yml

@@ -0,0 +1,369 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.​0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+
+# This role will not group the devices if user provides invalid credentials
+
+- name: Create inventory in awx
+  hosts: device_inventory
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Include collect_device_info vars
+      include_vars: "{{ playbook_dir }}/../vars/main.yml"
+      run_once: true
+
+    - name: Include variable file base_vars.yml
+      include_vars: "{{ base_vars_file }}"
+      run_once: true
+
+    - name: Check if tower_config_file file is encrypted
+      command: cat "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      changed_when: false
+      no_log: true
+      register: tower_config_content
+      run_once: true
+
+    - name: Decrypt tower_config_file
+      command: >-
+        ansible-vault decrypt "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        --vault-password-file "{{ playbook_dir }}/../../webui_awx/files/.tower_vault_key"
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in tower_config_content.stdout"
+      run_once: true
+
+    - name: Change file permissions
+      file:
+        path: "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        mode: "{{ file_perm }}"
+      run_once: true
+
+    - name: Fetch awx host
+      command: grep "host:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_host
+      changed_when: false
+      run_once: true
+
+    - name: Fetch awx username
+      command: grep "username:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_username
+      changed_when: false
+      run_once: true
+      no_log: true
+
+    - name: Fetch awx password
+      command: grep "password:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_password
+      changed_when: false
+      run_once: true
+      no_log: true
+
+    - name: Set awx variables
+      set_fact:
+        awx_host: "{{ fetch_awx_host.stdout | regex_replace('host: ','') }}"
+        awx_username: "{{ fetch_awx_username.stdout | regex_replace('username: ','') }}"
+        awx_password: "{{ fetch_awx_password.stdout | regex_replace('password: ','') }}"
+      no_log: true
+
+    - name: Encrypt tower_config_file
+      command: >-
+        ansible-vault encrypt "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        --vault-password-file "{{ playbook_dir }}/../../webui_awx/files/.tower_vault_key"
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in tower_config_content.stdout"
+      run_once: true
+
+    - name: Change file permissions
+      file:
+        path: "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        mode: "{{ file_perm }}"
+      run_once: true
+
+    - name: Check if {{ login_vars_file }} file is encrypted
+      command: cat {{ login_vars_file }}
+      changed_when: false
+      no_log: true
+      register: config_content
+      run_once: true
+
+    - name: Decrpyt {{ login_vars_file }}
+      command: >-
+        ansible-vault decrypt {{ login_vars_file }}
+        --vault-password-file {{ login_vault_file }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      changed_when: false
+      run_once: true
+
+    - name: Include variable file {{ login_vars_file }}
+      include_vars: "{{ login_vars_file }}"
+      no_log: true
+      run_once: true
+
+    - name: Encrypt {{ login_vars_file }}
+      command: >-
+        ansible-vault encrypt {{ login_vars_file }}
+        --vault-password-file {{ login_vault_file }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      run_once: true
+
+    - name: Initialize variables
+      set_fact:
+        idrac_inventory_status: false
+        ethernet_inventory_status: false
+        ib_inventory_status: false
+        powervault_me4_status: false
+
+    - name: idrac_inventory validation tasks
+      block:
+        - name: Fetch the hosts in idrac_inventory
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts list --inventory idrac_inventory -f human --filter "name"
+          changed_when: false
+          no_log: true
+          run_once: true
+          register: idrac_hosts
+
+        - name: Assert idrac IP
+          dellemc.openmanage.idrac_system_info:
+            idrac_ip: "{{ inventory_hostname }}"
+            idrac_user: "{{ idrac_username }}"
+            idrac_password: "{{ idrac_password }}"
+          register: idrac_info
+          when: inventory_hostname not in idrac_hosts.stdout
+
+        - name: Set idrac_inventory_status
+          set_fact:
+            idrac_inventory_status: true
+          when:
+            - inventory_hostname not in idrac_hosts.stdout
+            - idrac_search_key in idrac_info.system_info.iDRAC[0].ProductInfo
+      rescue:
+        - name: Failed while adding device to idrac_inventory
+          debug:
+            msg: "{{ idrac_inventory_fail_msg }}"
+      when: not idrac_inventory_status
+
+    - name: Add host to awx idrac_inventory
+      block:
+        - name: Add the host to awx idrac_inventory if not present
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts create --name {{ inventory_hostname }} --inventory idrac_inventory
+          changed_when: true
+          no_log: true
+      rescue:
+        - name: Failed while adding device to idrac_inventory
+          debug:
+            msg: "{{ idrac_inventory_fail_msg }}"
+      when: idrac_inventory_status
+
+    - name: ethernet_inventory validation tasks
+      block:
+        - name: Fetch the hosts in ethernet inventory
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts list --inventory ethernet_inventory -f human --filter "name"
+          changed_when: false
+          no_log: true
+          run_once: true
+          register: ethernet_switches
+
+        - name: Assert ethernet switch
+          dellos10_command:
+            provider:
+              host: "{{ inventory_hostname }}"
+              username: "{{ ethernet_switch_username }}"
+              password: "{{ ethernet_switch_password }}"
+            commands: ['show version']
+          when: inventory_hostname not in ethernet_switches.stdout
+          register: dellswitch_info
+          no_log: true
+
+        - name: Set ethernet_inventory_status
+          set_fact:
+            ethernet_inventory_status: true
+          when:
+            - inventory_hostname not in ethernet_switches.stdout
+            - dellswitch_info.stdout | regex_search(ethernet_search_key)
+      rescue:
+        - name: Failed while adding device to ethernet_inventory
+          debug:
+            msg: "{{ ethernet_inventory_fail_msg }}"
+      when:
+        - not idrac_inventory_status
+        - not ethernet_inventory_status
+        - inventory_hostname not in idrac_hosts.stdout
+
+    - name: Add the host to awx ethernet inventory
+      block:
+        - name: Add the host to awx ethernet inventory if not present
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts create --name {{ inventory_hostname }} --inventory ethernet_inventory
+          changed_when: true
+          no_log: true
+      rescue:
+        - name: Failed while adding device to ethernet_inventory
+          debug:
+            msg: "{{ ethernet_inventory_fail_msg }}"
+      when: ethernet_inventory_status
+
+    - name: ib_inventory validation tasks
+      block:
+        - name: Fetch the hosts in infiniband inventory
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts list --inventory infiniband_inventory -f human --filter "name"
+          changed_when: false
+          no_log: true
+          run_once: true
+          register: infiniband_switches
+
+        - name: Authenticate infiniband Switch
+          uri:
+            url: http://{{ inventory_hostname }}/admin/launch?script=rh&template=login&action=login
+            method: POST
+            body_format: form-urlencoded
+            body:
+              f_user_id: "{{ ib_username }}"
+              f_password: "{{ ib_password }}"
+              enter: Sign in
+            status_code: "{{ infiniband_status_code }}"
+          no_log: true
+          register: login
+          when: inventory_hostname not in infiniband_switches.stdout
+
+        - name: Assert infiniband switch
+          uri:
+            url: http://{{ inventory_hostname }}/admin/launch?script=json
+            method: POST
+            body_format: json
+            headers:
+              Cookie: "{{ login.set_cookie.split(';')[0] }}"
+            body:
+              {
+              "commands":
+              [
+                "show version"
+              ]
+              }
+          register: infinibandswitch_info
+          when:
+            - inventory_hostname not in infiniband_switches.stdout
+            - not login.failed
+
+        - name: Set ib_inventory_status
+          set_fact:
+            ib_inventory_status: true
+          when:
+            - inventory_hostname not in infiniband_switches.stdout
+            - not login.failed
+            - infinibandswitch_info.json.data['Product name'] == infiniband_search_key
+      rescue:
+        - name: Failed while adding device to ib_inventory
+          debug:
+            msg: "{{ ib_inventory_fail_msg }}"
+      when:
+        - not idrac_inventory_status
+        - not ethernet_inventory_status
+        - not ib_inventory_status
+        - inventory_hostname not in idrac_hosts.stdout
+
+    - name: Add the host to awx infiniband_inventory
+      block:
+        - name: Add the host to awx infiniband_inventory if not present
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts create --name {{ inventory_hostname }} --inventory infiniband_inventory
+          changed_when: true
+          no_log: true
+      rescue:
+        - name: Failed while adding device to ib_inventory
+          debug:
+            msg: "{{ ib_inventory_fail_msg }}"
+      when: ib_inventory_status
+
+    - name: powervault_me4_inventory validation tasks
+      block:
+        - name: Fetch the hosts in powervault me4 inventory
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts list --inventory powervault_me4_inventory -f human --filter "name"
+          changed_when: false
+          no_log: true
+          run_once: true
+          register: me4_storage
+
+        - name: Get auth string for powervault
+          shell: echo -n {{ powervault_me4_username }}_{{ powervault_me4_password }} | sha256sum
+          changed_when: false
+          register: auth_string
+          no_log: true
+          when: inventory_hostname not in me4_storage.stdout
+
+        - name: Get session key for powervault
+          uri:
+            url: https://{{ inventory_hostname }}/api/login/{{ auth_string.stdout | replace(" -", "") }}
+            method: GET
+            headers:
+              {'datatype': 'json'}
+            validate_certs: no
+          register: session_key
+          when: inventory_hostname not in me4_storage.stdout
+
+        - name: Assert me4_powervault
+          uri:
+            url: https://{{ inventory_hostname }}/api/show/system
+            method: GET
+            body_format: json
+            validate_certs: no
+            use_proxy: no
+            headers:
+              {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
+          register: system_info
+          when: inventory_hostname not in me4_storage.stdout
+
+        - name: Set powervault_me4_status
+          set_fact:
+            powervault_me4_status: true
+          when:
+            - inventory_hostname not in me4_storage.stdout
+            - me4_powervault_search_key in system_info.json.system[0]['scsi-product-id']
+      rescue:
+        - name: Failed while adding device to powervault_me4_inventory
+          debug:
+            msg: "{{ powervault_me4_fail_msg }}"
+      when:
+        - not idrac_inventory_status
+        - not ethernet_inventory_status
+        - not ib_inventory_status
+        - not powervault_me4_status
+        - inventory_hostname not in idrac_hosts.stdout
+
+    - name: Add the host to awx powervault_me4_inventory
+      block:
+        - name: Add the host to awx powervault_me4_inventory if not present
+          command: >-
+            awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+            --conf.insecure hosts create --name {{ inventory_hostname }} --inventory powervault_me4_inventory
+          changed_when: true
+          no_log: true
+      rescue:
+        - name: Failed while adding device to powervault_me4_inventory
+          debug:
+            msg: "{{ powervault_me4_fail_msg }}"
+      when: powervault_me4_status

+ 30 - 13
control_plane/roles/collect_device_info/tasks/main.yml

@@ -1,19 +1,36 @@
 # Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
 #
-#      http://www.apache.org/licenses/LICENSE-2.0
+#     http://www.apache.org/licenses/LICENSE-2.0
 #
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This role will not group the devices if user provides invalid credentials
 ---
 
-# Will be updated later in each PR
-- name: Pass
-  debug:
-    msg: "Pass"
+- name: Check if provisioned host file exists
+  stat:
+    path: "{{ mgmt_provisioned_hosts_file }}"
+  register: provisioned_file
+
+- name: Check the mgmt_provisioned_hosts_file output
+  command: cat {{ mgmt_provisioned_hosts_file }}
+  changed_when: false
+  register: mgmt_hosts
+  when: provisioned_file.stat.exists
+
+- name: Create device_inventory
+  add_host:
+    name: "{{ item }}"
+    groups: "device_inventory"
+  with_items: "{{ mgmt_hosts.stdout_lines }}"
+  when: 
+    - provisioned_file.stat.exists
+    - item | trim | length > 1

+ 36 - 0
control_plane/roles/collect_device_info/vars/main.yml

@@ -0,0 +1,36 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.​0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+
+# vars file for collect_device_info role
+# This role will not group the devices if user provides invalid credentials
+
+# Usage main.yml
+mgmt_provisioned_hosts_file: "{{ role_path }}/files/mgmt_provisioned_hosts.yml"
+
+# Usage create_inventory.yml
+infiniband_status_code: 302
+idrac_search_key: "Integrated Dell Remote Access Controller"
+ethernet_search_key: "OS10"
+infiniband_search_key: "MLNX-OS"
+me4_powervault_search_key: "ME4"
+idrac_inventory_fail_msg: "Failed. Unable to add {{ inventory_hostname }} to idrac_inventory"
+ethernet_inventory_fail_msg:  "Failed. Unable to add {{ inventory_hostname }} to ethernet_inventory"
+ib_inventory_fail_msg: "Failed. Unable to add {{ inventory_hostname }} to ib_inventory"
+powervault_me4_fail_msg: "Failed. Unable to add {{ inventory_hostname }} to powervault_me4_inventory"
+base_vars_file: "{{ playbook_dir }}/../../../input_params/base_vars.yml"
+login_vars_file: "{{ playbook_dir }}/../../../input_params/login_vars.yml"
+login_vault_file: "{{ playbook_dir }}/../../../input_params/.login_vault_key"
+file_perm: '0644'

+ 21 - 17
control_plane/roles/collect_node_info/files/add_host.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,36 +12,40 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-
-- name: Check if host already exists
-  command: awk "{{ '/'+ item + '/' }}" /root/inventory
-  register: check_host
-  changed_when: no
-
 - name: Initialise host description
   set_fact:
     host_description: "Description Unavailable"
-
+    
 - name: Fetch description
   set_fact:
     host_description: "CPU:{{ hostvars[item]['ansible_processor_count'] }}
     Cores:{{ hostvars[item]['ansible_processor_cores'] }}
     Memory:{{ hostvars[item]['ansible_memtotal_mb'] }}MB
     BIOS:{{ hostvars[item]['ansible_bios_version'] }}"
-  when: not check_host.stdout | regex_search(item)
   ignore_errors: yes
 
-- name: Add host
-  lineinfile:
-    path:  "/root/inventory"
-    line: "    {{ item }}:\n      _awx_description: {{ host_description }}"
+- name: Fetch the hosts in awx node inventory
+  command: >-
+    awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+    --conf.insecure hosts list --inventory node_inventory
+  changed_when: false
+  no_log: true
   when:
-    - not check_host.stdout | regex_search(item)
-    - host_description != "Description Unavailable"
+     - host_description != "Description Unavailable"
+  register: hosts
+  ignore_errors: yes
+  
+- name: Add the host to awx node inventory if not present
+  command: >-
+    awx --conf.host {{ awx_host }} --conf.username {{ awx_username }} --conf.password {{ awx_password }}
+    --conf.insecure hosts create --name {{ item }} --inventory node_inventory
+  changed_when: true
+  when: item not in hosts.stdout
+  no_log: true
+  ignore_errors: yes
 
 - name: Host added msg
   debug:
     msg: "{{ host_added_msg + item }}"
   when:
-    - not check_host.stdout | regex_search(item)
-    - host_description != "Description Unavailable"
+    - host_description != "Description Unavailable"

+ 99 - 22
control_plane/roles/collect_node_info/files/create_inventory.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,7 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-
 - name: Find reachable hosts
   hosts: all
   gather_facts: false
@@ -44,14 +43,29 @@
     - name: Include vars file of inventory role
       include_vars: ../vars/main.yml
 
-- name: Set hostname on reachable nodes and gather facts
+    - name: Check if omnia config file is encrypted
+      command: "cat {{ omnia_config_file }}"
+      changed_when: false
+      register: config_content
+      #no_log: True
+
+    - name: Decrpyt omnia_config.yml
+      command: >-
+        ansible-vault decrypt "{{ omnia_config_file }}"
+        --vault-password-file "{{ omnia_config_vault_file }}"
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+
+    - name: Include vars file of inventory role
+      include_vars: "{{ omnia_config_file }}"
+
+- name: Set hostname for reachable nodes and gather facts
   hosts: reachable
   gather_facts: False
   ignore_unreachable: true
-  remote_user: "{{ cobbler_username }}"
+  remote_user: "{{ host_username }}"
   vars:
-    ansible_password: "{{ cobbler_password }}"
-    ansible_become_pass: "{{ cobbler_password }}"
+    ansible_password: "{{ host_password }}"
+    ansible_become_pass: "{{ host_password }}"
     ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
     mapping_file_present: ""
   tasks:
@@ -65,21 +79,21 @@
       changed_when: false
       ignore_errors: true
 
-    - name: Check if IP present in mapping file
-      command: grep "{{ inventory_hostname }}" ../../provision/files/new_mapping_file.csv
+    - name: Check if IP is present in mapping file
+      command: grep "{{ inventory_hostname }}" ../../provision_cobbler/files/new_host_mapping_file.csv
       delegate_to: localhost
       register: file_present
       when: mapping_file | bool == true
       ignore_errors: true
 
-    - name: Set fact if mapping file present
+    - name: Set fact if mapping file is present
       set_fact:
         mapping_file_present: "{{ file_present.stdout }}"
       when: mapping_file | bool == true
       ignore_errors: true
 
     - name: Get the static hostname from mapping file
-      shell: awk -F',' '$3 == "{{ inventory_hostname }}" { print $2 }' ../../provision/files/new_mapping_file.csv
+      shell: awk -F',' '$3 == "{{ inventory_hostname }}" { print $2 }' ../../provision_cobbler/files/new_host_mapping_file.csv
       delegate_to: localhost
       when: ('localhost' in hostname_check.stdout) and (mapping_file_present != "" ) and ( mapping_file | bool == true )
       register: host_name
@@ -87,36 +101,34 @@
 
     - name: Set the hostname from mapping file
       hostname:
-        name: "{{ host_name.stdout }}"
+        name: "{{ host_name.stdout + '.' + hostvars['localhost']['domain_name'] }}"
       when: ('localhost' in hostname_check.stdout) and (mapping_file_present != "" ) and  (mapping_file | bool == true )
       ignore_errors: true
-    
+
     - name: Set the hostname if hostname not present mapping file
       hostname:
-        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] + '.' + hostvars['localhost']['domain_name'] }}"
       when: ('localhost' in hostname_check.stdout) and (file_present.rc != 0) and (mapping_file | bool == true )
       ignore_errors: true
 
     - name: Set the system hostname
       hostname:
-        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1]+'.'+ hostvars['localhost']['domain_name'] }}"
       when: ('localhost' in hostname_check.stdout) and (mapping_file | bool == false)
       ignore_errors: true
 
     - name: Add new hostname to /etc/hosts from mapping file
       lineinfile:
         dest: /etc/hosts
-        regexp: '^127\.0\.0\.1[ \t]+localhost'
-        line: "127.0.0.1 localhost {{ host_name.stdout }}"
+        line: "{{ inventory_hostname }} {{ host_name.stdout + '.' + hostvars['localhost']['domain_name'] }}"
         state: present
       when: ('localhost' in hostname_check.stdout) and ( mapping_file_present != "" ) and ( mapping_file | bool == true )
       ignore_errors: true
 
-    - name: Add new hostname to /etc/hosts if hostname not present mapping fil
+    - name: Add new hostname to /etc/hosts if hostname not present mapping file
       lineinfile:
         dest: /etc/hosts
-        regexp: '^127\.0\.0\.1[ \t]+localhost'
-        line: "127.0.0.1 localhost compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+        line: "{{ inventory_hostname }} compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1]+'.'+ hostvars['localhost']['domain_name'] }}"
         state: present
       when: ('localhost' in hostname_check.stdout) and ( file_present.rc != 0 ) and ( mapping_file | bool == true )
       ignore_errors: true
@@ -124,8 +136,7 @@
     - name: Add new hostname to /etc/hosts
       lineinfile:
         dest: /etc/hosts
-        regexp: '^127\.0\.0\.1[ \t]+localhost'
-        line: "127.0.0.1 localhost compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+        line: "{{ inventory_hostname }} compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] +'.'+ hostvars['localhost']['domain_name'] }}"
         state: present
       when: ('localhost' in hostname_check.stdout) and (mapping_file | bool == false )
       ignore_errors: true
@@ -135,6 +146,72 @@
   connection: local
   gather_facts: false
   tasks:
+    - name: Encrypt omnia_config.yml file
+      command: >-
+        ansible-vault encrypt "{{ omnia_config_file }}"
+        --vault-password-file "{{ omnia_config_vault_file }}"
+      changed_when: false
+
+    - name: Update omnia_config.yml permissions
+      file:
+        path: "{{ omnia_config_file }}"
+        mode: "{{ file_perm }}"
+
+    - name: Check if tower_config_file file is encrypted
+      command: cat "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      changed_when: false
+      no_log: true
+      register: tower_config_content
+      run_once: true
+
+    - name: Decrypt tower_config_file
+      command: >-
+        ansible-vault decrypt "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        --vault-password-file "{{ playbook_dir }}/../../webui_awx/files/.tower_vault_key"
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in tower_config_content.stdout"
+      run_once: true
+
+    - name: Change file permissions
+      file:
+        path: "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        mode: "{{ file_perm }}"
+
+    - name: Fetch awx host
+      command: grep "host:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_host
+      changed_when: false
+      run_once: true
+
+    - name: Fetch awx username
+      command: grep "username:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_username
+      changed_when: false
+      run_once: true
+      no_log: true
+
+    - name: Fetch awx password
+      command: grep "password:" "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+      register: fetch_awx_password
+      changed_when: false
+      run_once: true
+      no_log: true
+
+    - name: Set awx variables
+      set_fact:
+        awx_host: "{{ fetch_awx_host.stdout | regex_replace('host: ','') }}"
+        awx_username: "{{ fetch_awx_username.stdout | regex_replace('username: ','') }}"
+        awx_password: "{{ fetch_awx_password.stdout | regex_replace('password: ','') }}"
+      no_log: true
+
+    - name: Encrypt tower_config_file
+      command: >-
+        ansible-vault encrypt "{{ playbook_dir }}/../../webui_awx/files/.tower_cli.cfg"
+        --vault-password-file "{{ playbook_dir }}/../../webui_awx/files/.tower_vault_key"
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in tower_config_content.stdout"
+      run_once: true
+
     - name: Update inventory file
       block:
         - name: Fetch facts and add new hosts
@@ -145,4 +222,4 @@
     - name: Show unreachable hosts
       debug:
         msg: "{{ host_unreachable_msg }} + {{ groups['ungrouped'] }}"
-      when: "'ungrouped' in groups"
+      when: "'ungrouped' in groups"

+ 42 - 52
control_plane/roles/collect_node_info/tasks/main.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,85 +16,75 @@
   set_fact:
     ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
 
-- name: Check if provisioned host file exists
-  stat:
-    path: "{{ role_path }}/files/provisioned_hosts.yml"
-  register: provisioned_file_result
+- name: Fetch the execution environment
+  command: hostname
+  register: host_name
+  changed_when: false
 
-- name: Include vars file of control_plane_common role
-  include_vars: "{{ role_path }}/../control_plane_common/vars/main.yml"
-  no_log: True
+- name: Install sshpass
+  package:
+    name: sshpass
+    state: present
+  when: awx_search_key not in host_name.stdout
 
-- name: Include vars file of webui_awx role
-  include_vars: "{{ role_path }}/../webui_awx/vars/main.yml"
-  no_log: True
+- name: Check if provisioned host file exists
+  stat:
+    path: "{{ provisioned_hosts_file }}"
+  register: provisioned_file
 
-- name: Update inventory file
+- name: Include variable file base_vars.yml
+  include_vars: "{{ base_vars_file }}"
+  
+- name: Update inventory
   block:
-    - name: Check if input config file is encrypted
-      command: cat {{ input_config_filename }}
+    - name: Check if {{ login_vars_file }} file is encrypted
+      command: cat {{ login_vars_file }}
       changed_when: false
+      no_log: true
       register: config_content
+      run_once: true
 
-    - name: Decrpyt appliance_config.yml
+    - name: Decrpyt {{ login_vars_file }}
       command: >-
-        ansible-vault decrypt {{ input_config_filename }}
-        --vault-password-file {{ vault_filename }}
+        ansible-vault decrypt {{ login_vars_file }}
+        --vault-password-file {{ login_vault_file }}
       when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      changed_when: false
+      run_once: true
 
-    - name: Include variable file appliance_config.yml
-      include_vars: "{{ input_config_filename }}"
-      no_log: True
-
+    - name: Include variable file {{ login_vars_file }}
+      include_vars: "{{ login_vars_file }}"
+      no_log: true
+      run_once: true
+    
     - name: Save input variables from file
       set_fact:
-        cobbler_password: "{{ provision_password }}"
         mapping_file: false
-        path_mapping_file: "{{ mapping_file_path }}"
-      no_log: True
 
     - name: Check the status for mapping file
       set_fact:
         mapping_file: true
-      when: path_mapping_file != ""
+      when: host_mapping_file_path
 
-    - name: Encrypt input config file
+    - name: Encrypt {{ login_vars_file }}
       command: >-
-        ansible-vault encrypt {{ input_config_filename }}
-        --vault-password-file {{ vault_filename }}
+        ansible-vault encrypt {{ login_vars_file }}
+        --vault-password-file {{ login_vault_file }}
       changed_when: false
-
-    - name: Check if inventory file already exists
-      file:
-        path: "/root/inventory"
-        state: absent
-
-    - name: Create empty inventory file
-      copy:
-        dest:  "/root/inventory"
-        content: |
-          ---
-          all:
-            hosts:
-        owner: root
-        mode: 0775
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      run_once: true
 
     - name: Add inventory playbook
       block:
         - name: add hosts with description to inventory file
           command: >-
-            ansible-playbook -i {{ role_path }}/files/provisioned_hosts.yml
+            ansible-playbook -i {{ provisioned_hosts_file }}
             {{ role_path }}/files/create_inventory.yml
-            --extra-vars "cobbler_username={{ cobbler_username }} cobbler_password={{ cobbler_password }} mapping_file={{ mapping_file | bool }}"
+            --extra-vars "host_username={{ host_username }} host_password={{ provision_password }} mapping_file={{ mapping_file | bool }}"
           no_log: True
           register: register_error
       rescue:
         - name: Fail if host addition was not successful
           fail:
-            msg: "{{ register_error.stderr + register_error.stdout | regex_replace(cobbler_username) | regex_replace(cobbler_password) }}"
-
-  when: provisioned_file_result.stat.exists
-
-- name: push inventory to AWX
-  command: awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source /root/inventory
-  when: provisioned_file_result.stat.exists
+            msg: "{{ register_error.stderr + register_error.stdout | regex_replace(host_username) | regex_replace(provision_password) }}"
+  when: provisioned_file.stat.exists

+ 12 - 3
control_plane/roles/collect_node_info/vars/main.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,5 +12,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-host_added_msg: "Added host to inventory: "
-host_unreachable_msg: "Following hosts are unreachable: "
+provisioned_hosts_file: "{{ role_path }}/files/provisioned_hosts.yml"
+base_vars_file: "{{ role_path }}/../../input_params/base_vars.yml"
+login_vars_file: "{{ role_path }}/../../input_params/login_vars.yml"
+login_vault_file: "{{ role_path }}/../../input_params/.login_vault_key"
+omnia_config_file: "../../../../omnia_config.yml"
+omnia_config_vault_file: "../../../../.omnia_vault_key"
+file_perm: '0644'
+host_username: root
+host_added_msg: "Added host to awx inventory: "
+host_unreachable_msg: "Following hosts are unreachable: "
+awx_search_key: awx

+ 150 - 16
control_plane/test/test_powervault.yml

@@ -204,6 +204,124 @@
         fail_msg: "{{ user_validation_failure_msg }}"
       tags: VERIFY_OMNIA_01
 
+# Testcase OMNIA_1.1_PV_TC_018
+# Test case to validate linear storage with linear pools and disk groups
+
+- name: OMNIA_1.1_PV_TC_018
+  hosts: powervault
+  connection: local
+  tags: TC_018
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_powervault_vars.yml
+    - ../input_params/powervault_me4_vars.yml
+    - ../input_params/base_vars.yml
+    - ../roles/webui_awx/vars/main.yml
+  tasks:
+
+    - name: Set powervault parameters
+      lineinfile:
+        path: "{{ powervault_me4_var_path }}"
+        regexp: "{{ item.regexp }}"
+        line: "{{ item.line }}"
+      loop:
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ linear_pool_type }}"' }
+        - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia }}"' }
+        - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
+        - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_1_2 }}"' }
+        - { regexp: '^powervault_me4_k8s_volume_name', line: 'powervault_me4_k8s_volume_name: "{{ volume_name_k8s_v1 }}"' }
+        - { regexp: '^powervault_me4_slurm_volume_name', line: 'powervault_me4_slurm_volume_name: "{{ volume_name_slurm_v1 }}"' }
+        - { regexp: '^powervault_me4_volume_size', line: 'powervault_me4_volume_size: "{{ volume_size_input_100gb }}"' }
+
+    - name: Execute powervault playbook using AWX collections
+      vars:
+        inventory_name: "{{ powervault_inventory_name }}"
+        template_name: "{{ template_value }}"
+        job_template_name: "{{ job_name }}"
+        playbook_path: "{{ powervault_playbook_path }}"
+        delete_status: false
+      include_tasks: "{{ awx_script_path }}"  
+
+    - name: Validate powervault playbook execution output
+      assert:
+        that:
+          - success_job_status in job_status.status
+        success_msg: "{{ playbook_exec_success_msg }}"
+        fail_msg: "{{ playbook_exec_fail_msg }}"
+      changed_when: false
+
+    - name: Execute show disk groups command
+      uri:
+        url: https://{{ inventory_hostname }}/api/show/disk-groups/pool/{{ disk_group_name_omnia }}/{{ disk_group_name_omnia }}
+        method: GET
+        body_format: json
+        validate_certs: no
+        use_proxy: no
+        headers:
+          {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
+      register: pools_diskgroup_info
+      tags: VERIFY_OMNIA_01
+
+    - name: Verify the pool and disk group additions
+      assert:
+        that:
+          - linear_pool_type in pools_diskgroup_info.json['disk-groups'][0]['storage-type']
+          - disk_group_name_omnia in pools_diskgroup_info.json['disk-groups'][0].name
+          - disk_group_name_omnia in pools_diskgroup_info.json['disk-groups'][0].pool
+          - disk_type_ssdsas in pools_diskgroup_info.json['disk-groups'][0]['disk-description']
+          - raid1_level in pools_diskgroup_info.json['disk-groups'][0].raidtype
+        success_msg: "{{ pool_diskgroup_config_success_msg }}"
+        fail_msg: "{{ pool_diskgroup_config_failure_msg }}"
+      tags: VERIFY_OMNIA_01
+
+    - name: Execute show volumes command for k8s
+      uri:
+        url: https://{{ inventory_hostname }}/api/show/volumes/{{ volume_name_k8s_v1 }}
+        method: GET
+        body_format: json
+        validate_certs: no
+        use_proxy: no
+        headers:
+          {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
+      register: k8s_volumes_info
+      tags: VERIFY_OMNIA_01
+
+    - name: Execute show volumes command for slurm
+      uri:
+        url: https://{{ inventory_hostname }}/api/show/volumes/{{ volume_name_slurm_v1 }}
+        method: GET
+        body_format: json
+        validate_certs: no
+        use_proxy: no
+        headers:
+          {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
+      register: slurm_volumes_info
+      tags: VERIFY_OMNIA_01
+
+    - name: Verify the volumes creation
+      assert:
+        that:
+          - volume_name_k8s_v1 in k8s_volumes_info.json.volumes[0]['volume-name']
+          - volume_size_100gb in k8s_volumes_info.json.volumes[0].size
+          - volume_name_slurm_v1 in slurm_volumes_info.json.volumes[0]['volume-name']
+          - volume_size_100gb in slurm_volumes_info.json.volumes[0].size
+          - disk_group_name_omnia in k8s_volumes_info.json.volumes[0]['storage-pool-name']
+        success_msg: "{{ volumes_creation_success_msg }}"
+        fail_msg: "{{ volumes_creation_failure_msg }}"
+      tags: VERIFY_OMNIA_01
+   
+    - name: Delete the disk group created
+      uri:
+        url: https://{{ inventory_hostname }}/api/remove/disk-groups/{{ disk_group_name_omnia }}
+        method: GET
+        body_format: json
+        validate_certs: no
+        use_proxy: no
+        headers:
+          {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
+      register: delete_diskgroup
+
 # Testcase OMNIA_1.1_PV_TC_005
 # Test case to validate configuration of pools, disk group and storage volumes with SAS MDL disks, RAID 1 level, storage with rw access 
 
@@ -227,6 +345,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA01 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_5_6 }}"' }
@@ -240,7 +359,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -251,6 +370,15 @@
         fail_msg: "{{ playbook_exec_fail_msg }}"
       changed_when: false
 
+    - name: Get session key
+      uri:
+        url: https://{{ inventory_hostname }}/api/login/{{ auth_string.stdout | replace(" -", "") }}
+        method: GET
+        headers:
+          {'datatype': 'json'}
+        validate_certs: no
+      register: session_key
+
     - name: Execute show disk groups command
       uri:
         url: https://{{ inventory_hostname }}/api/show/disk-groups/pool/{{ pool_a }}/{{ disk_group_name_omnia_dgA01 }}
@@ -261,7 +389,6 @@
         headers:
           {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
       register: pools_diskgroup_info
-      tags: VERIFY_OMNIA_01
 
     - name: Verify the pool and disk group additions
       assert:
@@ -273,7 +400,6 @@
           - raid1_level in pools_diskgroup_info.json['disk-groups'][0].raidtype
         success_msg: "{{ pool_diskgroup_config_success_msg }}"
         fail_msg: "{{ pool_diskgroup_config_failure_msg }}"
-      tags: VERIFY_OMNIA_01
 
     - name: Execute show volumes command
       uri:
@@ -285,7 +411,6 @@
         headers:
           {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
       register: k8s_volumes_info
-      tags: VERIFY_OMNIA_01
 
     - name: Execute show volumes command
       uri:
@@ -297,7 +422,6 @@
         headers:
           {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
       register: slurm_volumes_info
-      tags: VERIFY_OMNIA_01
 
     - name: Verify the volumes creation
       assert:
@@ -309,7 +433,6 @@
           - pool_a in k8s_volumes_info.json.volumes[0]['storage-pool-name']
         success_msg: "{{ volumes_creation_success_msg }}"
         fail_msg: "{{ volumes_creation_failure_msg }}"
-      tags: VERIFY_OMNIA_01
 
 # Testcase OMNIA_1.1_PV_TC_004
 # Test case to validate the SNMP trap notification when level is set to none
@@ -368,6 +491,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_3_4 }}"' }
@@ -381,7 +505,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -415,6 +539,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_5_6 }}"' }
@@ -428,7 +553,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -462,6 +587,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA01 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_2_3 }}"' }
@@ -475,7 +601,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -509,6 +635,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_2_3 }}"' }
@@ -522,7 +649,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -587,6 +714,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_2_3 }}"' }
@@ -600,7 +728,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -665,6 +793,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid5_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_0_2 }}"' }
@@ -678,7 +807,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -765,6 +894,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_b }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgB01 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid6_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_7_10 }}"' }
@@ -778,7 +908,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -865,6 +995,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_b }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgB01 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid10_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_8_11 }}"' }
@@ -878,7 +1009,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -965,6 +1096,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_b }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgB01 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid_adapt_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_0_11 }}"' }
@@ -978,7 +1110,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -1036,6 +1168,7 @@
         line: "{{ item.line }}"
       loop:
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_2_3 }}"' }
@@ -1049,7 +1182,7 @@
         template_name: "{{ template_value }}"
         job_template_name: "{{ job_name }}"
         playbook_path: "{{ powervault_playbook_path }}"
-        delete_status: true
+        delete_status: false
       include_tasks: "{{ awx_script_path }}" 
 
     - name: Validate powervault playbook execution output
@@ -1145,6 +1278,7 @@
       loop:
         - { regexp: '^powervault_me4_snmp_notify_level', line: 'powervault_me4_snmp_notify_level: "{{ snmp_notify_level_crit }}"' }
         - { regexp: '^powervault_me4_pool', line: 'powervault_me4_pool: "{{ pool_a }}"' }
+        - { regexp: '^powervault_me4_pool_type', line: 'powervault_me4_pool_type: "{{ pool_type }}"' }
         - { regexp: '^powervault_me4_disk_group_name', line: 'powervault_me4_disk_group_name: "{{ disk_group_name_omnia_dgA02 }}"' }
         - { regexp: '^powervault_me4_raid_levels', line: 'powervault_me4_raid_levels: "{{ raid1_level }}"' }
         - { regexp: '^powervault_me4_disk_range', line: 'powervault_me4_disk_range: "{{ disk_range_2_3 }}"' }

+ 4 - 1
control_plane/test/test_vars/test_powervault_vars.yml

@@ -36,6 +36,7 @@ user_locale: "English"
 pool_a: "A"
 pool_b: "B"
 pool_type: "Virtual"
+linear_pool_type: "Linear"
 raid1_level: "RAID1"
 raid5_level: "RAID5"
 raid6_level: "RAID6"
@@ -43,6 +44,7 @@ raid10_level: "RAID10"
 raid_adapt_level: "ADAPT"
 disk_type_ssdsas: "SSD SAS"
 disk_type_sasmdl: "SAS MDL"
+disk_group_name_omnia: "omnia"
 disk_group_name_omnia_dgA01: "omnia_dgA01"
 disk_group_name_omnia_dgA02: "omnia_dgA02"
 disk_group_name_omnia_dgB01: "omnia_dgB01"
@@ -52,6 +54,7 @@ volume_name_k8s_v2: "k8s_V2"
 volume_name_slurm_v2: "slurm_V2"
 volume_size_input_100gb: "100GB"
 volume_size_100gb: "99.9GB"
+disk_range_1_2: "0.1-2"
 disk_range_5_6: "0.5-6"
 disk_range_3_4: "0.3-4"
 disk_range_7_10: "0.7-10"
@@ -72,7 +75,7 @@ login_vars_path: "../input_params/login_vars.yml"
 login_vars_vault_path: "../input_params/.login_vault_key"
 powervault_me4_var_path: "../input_params/powervault_me4_vars.yml"
 base_var_path: "../input_params/base_vars.yml"
-powervault_inventory_name: "powervault_me4_inventory"
+powervault_inventory_name: "test_powervault_me4_inventory"
 template_value: "powervault_me4_template"
 job_name: "test_powervault_me4_template"
 powervault_playbook_path: "control_plane/powervault_me4.yml"

+ 64 - 14
omnia.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,10 +21,10 @@
     - cluster_validation
 
 - name: Gather facts from all the nodes
-  hosts: all
+  hosts: manager, compute, login_node, nfs_node
 
 - name: Apply common installation and config
-  hosts: manager, compute
+  hosts: manager, compute, login_node
   gather_facts: false
   roles:
     - common
@@ -72,23 +72,73 @@
     - k8s_firewalld
   tags: kubernetes
 
+- name: Powervault Server Configuration
+  hosts: nfs_node
+  gather_facts: false
+  tasks:
+    - name: Configuring NFS node
+      include_role:
+        name: powervault_me4_nfs
+      when: hostvars['127.0.0.1']['powervault_status']
+
+- name: Map volume
+  hosts: powervault_me4
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Include map volume task
+      include_tasks: "{{ playbook_dir }}/control_plane/roles/powervault_me4/tasks/ports.yml"
+      when: hostvars['127.0.0.1']['powervault_status']
+
+    - name: Include map volume task
+      include_tasks: "{{ playbook_dir }}/control_plane/roles/powervault_me4/tasks/map_volume.yml"
+      when: hostvars['127.0.0.1']['powervault_status']
+
+- name: Apply NFS server setup on NFS node
+  hosts: nfs_node
+  gather_facts: false
+  tasks:
+    - name: Check the mapped volume on server
+      include_role:
+        name: powervault_me4_nfs
+        tasks_from: nfs_volume
+      when: hostvars['127.0.0.1']['powervault_status']
+
+    - name: Mount partitions
+      include_role:
+         name: powervault_me4_nfs
+         tasks_from: mount_me4_partitions
+      when: hostvars['127.0.0.1']['powervault_status']
+
+    - name: Setup NFS server on the partitions
+      include_role:
+         name: powervault_me4_nfs
+         tasks_from: me4_nfs_server_setup
+      when: hostvars['127.0.0.1']['powervault_status']
+
 - name: Apply NFS server setup on manager node
   hosts: manager
   gather_facts: false
-  roles:
-    - k8s_nfs_server_setup
-  tags:
-    - kubernetes
-    - nfs
+  tasks:
+    - name: Apply NFS server setup on manager node
+      include_role:
+        name: k8s_nfs_server_setup
+      when: not hostvars['127.0.0.1']['powervault_status']
+      tags:
+        - kubernetes
+        - nfs
 
 - name: Apply NFS client setup on compute nodes
   hosts: compute
   gather_facts: false
-  roles:
-    - k8s_nfs_client_setup
-  tags:
-    - kubernetes
-    - nfs
+  tasks:
+    - name: Apply NFS client setup on compute nodes
+      include_role:
+        name: k8s_nfs_client_setup
+      when: not hostvars['127.0.0.1']['powervault_status']
+      tags:
+        - kubernetes
+        - nfs
 
 - name: Start K8s on manager server
   hosts: manager
@@ -156,4 +206,4 @@
 
 - name: Passwordless SSH between manager and compute nodes
   include: control_plane/tools/passwordless_ssh.yml
-  when: hostvars['127.0.0.1']['appliance_status']
+  when: hostvars['127.0.0.1']['control_plane_status']

+ 4 - 2
roles/k8s_nfs_client_setup/tasks/main.yml

@@ -21,12 +21,14 @@
   tags: nfs_client
 
 - name: Check mounted share
-  shell: mount | grep nfs
+  shell: >
+    set -o pipefail && \
+    mount | grep nfs
   changed_when: false
   args:
     warn: false
   register: mounted_share
-  ignore_errors: True
+  failed_when: false
   tags: nfs_client
 
 - name: Creating directory to mount NFS Share

+ 1 - 1
roles/k8s_nfs_client_setup/vars/main.yml

@@ -19,6 +19,6 @@ nfs_share_dir: /home/k8snfs
 
 mounthost: "{{ groups['manager'][0] }}"
 
-nfs_mnt_dir_mode: 0755
+nfs_mnt_dir_mode: 0777
 
 fstab_file_path: /etc/fstab

+ 14 - 4
roles/k8s_start_services/tasks/deploy_k8s_services.yml

@@ -108,10 +108,20 @@
   changed_when: true
   tags: init
 
-- name: Start NFS Client Provisioner
-  command: "helm install stable/nfs-client-provisioner --set nfs.server='{{ nfs_server }}' --set nfs.path='{{ nfs_path }}' --generate-name"
+- name: Start NFS Client Provisioner using NFS on manager node
+  command: "helm install stable/nfs-client-provisioner --set nfs.server='{{ nfs_server_manager_node }}' --set nfs.path='{{ nfs_share_dir }}' --generate-name"
   changed_when: true
-  when: "'nfs-client-provisioner' not in k8s_pods.stdout"
+  when:
+    - "'nfs-client-provisioner' not in k8s_pods.stdout"
+    - not hostvars['127.0.0.1']['powervault_status']
+  tags: init
+
+- name: Start NFS Client Provisioner using NFS on NFS Node
+  command: "helm install stable/nfs-client-provisioner --set nfs.server='{{ nfs_server_nfs_node }}' --set nfs.path='{{ me4_nfs_share_k8s }}' --generate-name"
+  changed_when: true
+  when:
+    - "'nfs-client-provisioner' not in k8s_pods.stdout"
+    - hostvars['127.0.0.1']['powervault_status']
   tags: init
 
 - name: Set NFS-Client Provisioner as DEFAULT StorageClass
@@ -217,4 +227,4 @@
   command: helm install my-release spark-operator/spark-operator --set image.tag={{ operator_image_tag }} --namespace spark-operator --create-namespace
   changed_when: true
   when: "'spark-operator' not in k8s_pods.stdout"
-  tags: init
+  tags: init

+ 3 - 3
roles/k8s_start_services/vars/main.yml

@@ -61,9 +61,9 @@ k8s_dashboard_yaml_url: https://raw.githubusercontent.com/kubernetes/dashboard/v
 
 helm_stable_repo_url: https://charts.helm.sh/stable
 
-nfs_server: "{{ ansible_host }}"
+nfs_server_manager_node: "{{ ansible_host }}"
 
-nfs_path: /home/k8snfs
+nfs_server_nfs_node: "{{ groups['nfs_node'][0] }}"
 
 mpi_operator_yaml_url: https://raw.githubusercontent.com/kubeflow/mpi-operator/master/deploy/v1alpha2/mpi-operator.yaml
 
@@ -93,4 +93,4 @@ spark_operator_repo: https://googlecloudplatform.github.io/spark-on-k8s-operator
 
 operator_image_tag: v1beta2-1.2.3-3.1.1
 
-volcano_scheduling_yaml_url: https://raw.githubusercontent.com/volcano-sh/volcano/v1.3.0/installer/volcano-development.yaml
+volcano_scheduling_yaml_url: https://raw.githubusercontent.com/volcano-sh/volcano/v1.3.0/installer/volcano-development.yaml

+ 21 - 0
roles/powervault_me4_nfs/tasks/main.yml

@@ -0,0 +1,21 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Include variable file for powervault
+  include_vars: "{{ pv_nfs_file }}"
+  no_log: true
+
+- name: Configure the server
+  include_tasks: nfs_node_configure.yml

+ 79 - 0
roles/powervault_me4_nfs/tasks/me4_nfs_server_setup.yml

@@ -0,0 +1,79 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Install nfs-utils
+  package:
+    name: nfs-utils
+    state: present
+
+- name: Install firewalld
+  package:
+    name: firewalld
+    state: present
+
+- name: Start and enable firewalld
+  service:
+    name: firewalld
+    state: started
+    enabled: yes
+
+- name: Start and enable rpcbind and nfs-server service
+  service:
+    name: "{{ item }}"
+    state: restarted
+    enabled: yes
+  with_items:
+    - rpcbind
+    - nfs-server
+
+- name: Adding K8s NFS share entries in /etc/exports
+  lineinfile:
+    path: "{{ exports_file_path }}"
+    line: "{{ me4_nfs_share_k8s }} {{ item }}(rw,sync,no_root_squash)"
+  with_items:
+    - "{{ groups['manager'] }}"
+    - "{{ groups['compute'] }}"
+
+- name: Adding K8s NFS share entries in /etc/exports
+  lineinfile:
+    path: "{{ exports_file_path }}"
+    line: "{{ me4_nfs_share_k8s }} {{ item }}(rw,sync,no_root_squash)"
+  with_items:
+    - "{{ groups['manager'] }}"
+    - "{{ groups['compute'] }}"
+
+- name: Adding Slurm NFS share entries in /etc/exports
+  lineinfile:
+    path: "{{ exports_file_path }}"
+    line: "{{ me4_nfs_share_slurm }} {{ item }}(rw,sync,no_root_squash)"
+  with_items:
+    - "{{ groups['manager'] }}"
+    - "{{ groups['compute'] }}"
+
+- name: Exporting the shared directories
+  command: exportfs -ra
+  changed_when: true
+
+- name: Configuring firewall
+  firewalld:
+    service: "{{ item }}"
+    permanent: true
+    state: enabled
+  with_items:
+    - "{{ nfs_services }}"
+
+- name: Reload firewalld
+  command: firewall-cmd --reload
+  changed_when: true

+ 111 - 0
roles/powervault_me4_nfs/tasks/mount_me4_partitions.yml

@@ -0,0 +1,111 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Get ME4 volume
+  shell: >
+    set -o pipefail && \
+    lsscsi -s | grep ME4
+  changed_when: false
+  register: me4_output
+  failed_when: false
+
+- name: ME4 volume check
+  fail:
+    msg: "{{ me4_volume_fail_msg }}"
+  when: me4_output is failed or (me4_output.stdout | regex_findall('ME4') | length) != 2
+
+- name: Set ME4 data facts
+  set_fact:
+    me4_k8s_volume_data: "{{ me4_output.stdout.split('\n')[0].split(' ') | select() }}"
+    me4_slurm_volume_data: "{{ me4_output.stdout.split('\n')[1].split(' ') | select() }}"
+
+- name: Add ME4 volume data to dummy host
+  add_host:
+    name:   "NFS_NODE_TOKEN_HOLDER"
+    me4_k8s_volume: "{{ me4_k8s_volume_data[-2] }}"
+    me4_slurm_volume: "{{ me4_slurm_volume_data[-2] }}"
+
+- name: Get all mounted partitions
+  command: df -h
+  changed_when: false
+  register: mounted_partitions
+
+- name: Create partition on ME4 volumes
+  command: "parted -a optimal {{ item }} --script -- mklabel gpt mkpart primary 0% {{ powervault_me4_disk_partition_size }}"
+  changed_when: true
+  with_items:
+    - "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_k8s_volume'] }}"
+    - "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_slurm_volume'] }}"
+  when:
+    - hostvars['NFS_NODE_TOKEN_HOLDER']['me4_k8s_volume'] not in mounted_partitions.stdout
+    - hostvars['NFS_NODE_TOKEN_HOLDER']['me4_slurm_volume'] not in mounted_partitions.stdout
+
+- name: Update kernel with new partition changes
+  command: partprobe
+  changed_when: false
+
+- name: Check ME4 mounted partitions
+  shell: >
+    set -o pipefail && \
+    mount | grep me4
+  failed_when: false
+  changed_when: false
+  args:
+    warn: false
+  register: me4_mounted_partitions
+
+- name: Set file system on partition
+  shell: >
+    set -o pipefail && \
+    echo y | mkfs -t ext4 {{ item }}1
+  with_items:
+    - "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_k8s_volume'] }}"
+    - "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_slurm_volume'] }}"
+  when:
+    - me4_nfs_share_k8s not in me4_mounted_partitions.stdout
+    - me4_nfs_share_slurm not in me4_mounted_partitions.stdout
+
+- name: Creating NFS share directories
+  file:
+    path: "{{ item }}"
+    state: directory
+    mode: "{{ nfs_share_dir_mode }}"
+  with_items:
+    - "{{ me4_nfs_share_k8s }}"
+    - "{{ me4_nfs_share_slurm }}"
+
+- name: Mount K8s partition on K8s NFS share
+  command: "mount {{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_k8s_volume'] }}1 {{ me4_nfs_share_k8s }}"
+  changed_when: true
+  args:
+    warn: false
+  when: me4_nfs_share_k8s not in me4_mounted_partitions.stdout
+
+- name: Mount Slurm partition on Slurm NFS share
+  command: "mount {{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_slurm_volume'] }}1 {{ me4_nfs_share_slurm }}"
+  changed_when: true
+  args:
+    warn: false
+  when: me4_nfs_share_slurm not in me4_mounted_partitions.stdout
+
+- name: Configuring auto mount K8s partition on reboot
+  lineinfile:
+    path: "{{ fstab_file_path }}"
+    line: "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_k8s_volume'] }}1            {{ me4_nfs_share_k8s }}      ext4            defaults        0     0"
+
+- name: Configuring auto mount Slurm partition on reboot
+  lineinfile:
+    path: "{{ fstab_file_path }}"
+    line: "{{ hostvars['NFS_NODE_TOKEN_HOLDER']['me4_slurm_volume'] }}1            {{ me4_nfs_share_slurm }}      ext4            defaults        0     0"

+ 137 - 0
roles/powervault_me4_nfs/tasks/nfs_node_configure.yml

@@ -0,0 +1,137 @@
+
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Include
+  include_tasks: "{{ playbook_dir }}/control_plane/roles/powervault_me4/tasks/ports.yml"
+
+- name: Refresh ssh keys
+  command: ssh-keygen -R {{ groups['powervault_me4'][0] }}
+  changed_when: false
+  tags: install
+  failed_when: false
+
+- name: Validate authentication of username and password
+  command: ping -c1 {{ groups['powervault_me4'][0] }}
+  register: validate_login
+  changed_when: false
+  failed_when: false
+
+- name: Install packages
+  package:
+    name: iscsi-initiator-utils
+    state: present
+  tags: install
+
+- name: Install packages
+  package:
+    name: sg3_utils
+    state: present
+  tags: install
+
+- name: Set bootproto value
+  lineinfile:
+    path: "{{ nic_path }}"
+    regexp: '^BOOTPROTO='
+    line: 'BOOTPROTO=none'
+  register: result
+
+- name: Set onboot value
+  lineinfile:
+    path: "{{ nic_path }}"
+    regexp: '^ONBOOT='
+    line: 'ONBOOT=yes'
+
+- name: Add ip address
+  lineinfile:
+    path: "{{ nic_path }}"
+    insertafter: '^ONBOOT=yes'
+    line: 'IPADDR={{ pv_nic_ip }}'
+
+- name: Add netmask address
+  lineinfile:
+    path: "{{ nic_path }}"
+    insertafter: '^IPADDR={{ pv_nic_ip }}'
+    line: NETMASK=255.255.255.0
+
+- name: Down the nic
+  command: ifdown {{ pv_nic }}
+  changed_when: true
+  failed_when: false
+  tags: install
+
+- name: Up the nic
+  command: ifup {{ pv_nic }}
+  changed_when: true
+  tags: install
+
+- name: Show ip
+  shell: >
+    set -o pipefail && \
+    ifconfig {{ pv_nic }} | grep 'inet' |cut -d: -f2 |  awk '{ print $2}'
+  changed_when: false
+
+- name: Discover nodes
+  command: iscsiadm -m discovery -t sendtargets -p {{ item }}
+  with_items: "{{ set_port_ip }}"
+  register: ports_available
+  failed_when: false
+  changed_when: false
+  tags: install
+
+- name: Pv port ip
+  add_host:
+    name: pv
+    map_ip: "{{ item.item }}"
+  with_items: "{{ ports_available.results }}"
+  when: item.rc == 0
+
+- name: Pv port ip
+  set_fact:
+    map_ip_output: "{{ item.stdout_lines }}"
+  with_items: "{{ ports_available.results }}"
+  when: item.rc == 0
+
+- name: Find feasible port ip
+  set_fact:
+    discover: "{{ item }}"
+  with_items: "{{ map_ip_output }}"
+  when: hostvars['pv']['map_ip'] in item
+
+- name: Split on comma
+  set_fact:
+    ip_port: "{{ discover.split(',')[0] }}"
+
+- name: Pv name
+  set_fact:
+    pv_name: "{{ discover.split(',')[1].split()[1] }}"
+
+- name: IQDN id
+  shell: >
+    set -o pipefail && \
+    cat /etc/iscsi/initiatorname.iscsi | cut -f2 -d"="
+  register: iqdn_id
+  changed_when: false
+  tags: install
+
+- name: Add ME4 volume data to dummy host
+  add_host:
+    name:   "server_iqdn_id"
+    server_iqdn: "{{ iqdn_id.stdout }}"
+
+- name: Login to the powervault
+  command: iscsiadm -m node --login {{ pv_name }} -p {{ ip_port }}
+  changed_when: true
+  tags: install

+ 40 - 0
roles/powervault_me4_nfs/tasks/nfs_volume.yml

@@ -0,0 +1,40 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Refresh ssh keys
+  command: ssh-keygen -R {{ groups['powervault_me4'][0] }}
+  changed_when: false
+  tags: install
+  failed_when: false
+
+- name: Validate authentication of username and password
+  command: ping -c1 {{ groups['powervault_me4'][0] }}
+  register: validate_login
+  changed_when: false
+  failed_when: false
+
+- name: Scan for getting the volume
+  command: rescan-scsi-bus.sh --forcerescan
+  changed_when: false
+  register: volume_pv
+  tags: install
+
+- name: Assert if volume created or not
+  assert:
+    that:
+      - "' Model: ME4' in volume_pv.stdout"
+    success_msg: "Volume is created"
+    fail_msg: "Volume is not created properly."
+  tags: install

+ 38 - 0
roles/powervault_me4_nfs/vars/main.yml

@@ -0,0 +1,38 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# vars file for powervault_me4_nfs
+
+# Usage: mount_me4_partitions.yml
+me4_volume_fail_msg: "ME4 volumes not found!"
+me4_nfs_share_k8s: "/me4_k8s_nfs"
+me4_nfs_share_slurm: "/me4_slurm_nfs"
+fstab_file_path: "/etc/fstab"
+
+# Usage: me4_nfs_server_setup.yml
+exports_file_path: /etc/exports
+nfs_services:
+  - mountd
+  - rpc-bind
+  - nfs
+nfs_share_dir_mode: 0777
+
+# Usage: nfs_node_configure.yml
+pv_nic: "{{ powervault_me4_server_nic }}"
+pv_nic_ip: 192.168.25.3
+pv_nic_gateway: 192.168.25.1
+pv_port_ip: 192.168.25.5
+pv_nfs_file: "{{ role_path }}/../../control_plane/input_params/powervault_me4_vars.yml"
+nic_path: "/etc/sysconfig/network-scripts/ifcfg-{{ powervault_me4_server_nic }}"