Browse Source

Merge branch 'devel' into component_role

Lucas A. Wilson 3 years ago
parent
commit
d7dbe7fb7b
27 changed files with 2395 additions and 36 deletions
  1. 3 3
      .github/workflows/ansible-lint.yml
  2. 16 8
      control_plane/input_params/powervault_me4_vars.yml
  3. 37 0
      control_plane/roles/control_plane_common/tasks/count_component_roles.yml
  4. 9 1
      control_plane/roles/control_plane_common/tasks/main.yml
  5. 82 0
      control_plane/roles/control_plane_common/tasks/validate_device_mapping_file.yml
  6. 179 0
      control_plane/roles/control_plane_common/tasks/validate_host_mapping_file.yml
  7. 30 0
      control_plane/roles/control_plane_common/vars/main.yml
  8. 14 6
      control_plane/roles/powervault_me4/tasks/map_volume.yml
  9. 5 0
      control_plane/roles/powervault_me4/tasks/ports.yml
  10. 4 4
      control_plane/roles/powervault_me4/tasks/pv_me4_prereq.yml
  11. 15 3
      control_plane/roles/powervault_me4/tasks/pv_validation.yml
  12. 76 7
      control_plane/roles/powervault_me4/tasks/volume.yml
  13. 1 2
      control_plane/roles/powervault_me4/vars/main.yml
  14. 709 0
      control_plane/test/test_control_plane.yml
  15. 271 0
      control_plane/test/test_control_plane_validation.yml
  16. 47 0
      control_plane/test/test_eth_mtu.yml
  17. 346 0
      control_plane/test/test_ethernet_config.yml
  18. 157 0
      control_plane/test/test_ethernet_fact.yml
  19. 5 0
      control_plane/test/test_ethernet_inventory
  20. 150 0
      control_plane/test/test_vars/base_vars.yml
  21. 81 0
      control_plane/test/test_vars/login_vars.yml
  22. 94 0
      control_plane/test/test_vars/test_control_plane_vars.yml
  23. 56 0
      control_plane/test/test_vars/test_ethernet_vars.yml
  24. 3 0
      examples/host_mapping_file_one_touch.csv
  25. 3 0
      examples/host_mapping_file_os_provisioning.csv
  26. 2 0
      examples/mapping_device_file.csv
  27. 0 2
      examples/mapping_file.csv

+ 3 - 3
.github/workflows/ansible-lint.yml

@@ -39,9 +39,9 @@ jobs:
         # [optional]
         # Arguments to override a package and its version to be set explicitly.
         # Must follow the example syntax.
-        #override-deps: |
-        #  ansible==2.9
-        #  ansible-lint==4.2.0
+        override-deps: |
+          ansible==2.10
+          ansible-lint==5.0.7
         # [optional]
         # Arguments to be passed to the ansible-lint
 

+ 16 - 8
control_plane/input_params/powervault_me4_vars.yml

@@ -22,7 +22,7 @@ locale: "English"
 # Specify the system name to identify the system
 # By default it is set to "Uninitialized_Name"
 # Length should be less than 30 and it should not contain space.
-# This is "optional"
+# Optional
 powervault_me4_system_name: "Unintialized_Name"
 
 # Specify the snmp notification level
@@ -37,6 +37,11 @@ powervault_me4_system_name: "Unintialized_Name"
 # Compulsory
 powervault_me4_snmp_notify_level: "none"
 
+# The type of pool to be created on the powervault
+# It can be either linear or virtual.
+# Default: linear
+powervault_me4_pool_type: "linear"
+
 # Specify the required RAID Level
 # The different RAID levels and the min and max number of disks supported for each RAID are
 # r1/raid1: 2
@@ -53,24 +58,27 @@ powervault_me4_raid_levels: "raid1"
 # the enclosure number and disk range in the Enter Range of Disks text box. 
 # Use the format enclosure-number.disk-range,enclosure-number.disk-range. 
 # For example, to select disks 3-12 in enclosure 1 and 5-23 in enclosure 2, enter 1.3-12,2.5-23.
-# For ME4012 - 0.0-0.11,1.0-1.11 are the allowed values
+# For ME4012 - 0.0-0.11 are the allowed values
+# For RAID 10 disk range should be given in subgroups separated by colons with no spaces.
+# RAID-10 example:1.1-2:1.3-4:1.7,1.10
 # Default value is 0.1-2
 # Compulsory
 powervault_me4_disk_range: "0.1-2"
 
 # Specify the volume names
-# Cannot be left blank
 # the default value is "k8s_volume" and "slurm_volume"
 # Compulsory
 powervault_me4_k8s_volume_name: "k8s_volume"
 powervault_me4_slurm_volume_name: "slurm_volume"
 
 # Specify the disk group name
-# If left blank, system automatically assigns the name
+# Mandatory
 powervault_me4_disk_group_name: "omnia"
 
 # Specify the percentage for partition in disk
 # Default value is "60%"
+# Min: 5
+# Max: 90
 # Compulsory
 powervault_me4_disk_partition_size: "60"
 
@@ -79,12 +87,12 @@ powervault_me4_disk_partition_size: "60"
 # Compulsory
 powervault_me4_volume_size: "100GB"
 
-#Specify the pool for volume
+# Specify the pool for disk and volumes
 # Pool can either be  a/A or b/B.
-# Compulsory
+# Mandatory if powervault_me4_pool_type selected as "virtual".
 powervault_me4_pool: "a"
 
 # Specify the nic of the server with which Powervault is connected.
-# Default value is eno1.
+# Default value is em1.
 # Compulsory
-powervault_me4_server_nic: "eno1"
+powervault_me4_server_nic: "em1"

+ 37 - 0
control_plane/roles/control_plane_common/tasks/count_component_roles.yml

@@ -0,0 +1,37 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# limitations under the License.
+---
+
+- name: Count of manager nodes defined
+  set_fact:
+    count_of_manager: "{{ count_of_manager| int + 1 }}"
+  when: item == group_name_manager
+  tags: install
+
+- name: Count of compute nodes defined
+  set_fact:
+    count_of_compute: "{{ count_of_compute| int + 1 }}"
+  when: item == group_name_compute
+  tags: install
+
+- name: Count of login nodes defined
+  set_fact:
+    count_of_login: "{{ count_of_login| int + 1 }}"
+  when: item == group_name_login
+  tags: install
+
+- name: Count of NFS nodes defined
+  set_fact:
+    count_of_nfs_node: "{{ count_of_nfs_node| int + 1 }}"
+  when: item == group_name_nfs
+  tags: install

+ 9 - 1
control_plane/roles/control_plane_common/tasks/main.yml

@@ -38,8 +38,16 @@
   import_tasks: fetch_sm_inputs.yml
   when: ib_switch_support
 
+- name: Host mapping file validation
+  import_tasks: validate_host_mapping_file.yml
+  when: host_mapping_file_path |length >0
+
+- name: Device mapping file validation
+  import_tasks: validate_device_mapping_file.yml
+  when: mngmnt_mapping_file_path |length >0
+
 - name: Encrypt idrac_tools_vars.yml
   import_tasks: encrypt_idrac_tools_vars.yml
 
 - name: NFS Server setup for offline repo and awx
-  import_tasks: nfs_server_setup.yml
+  import_tasks: nfs_server_setup.yml

+ 82 - 0
control_plane/roles/control_plane_common/tasks/validate_device_mapping_file.yml

@@ -0,0 +1,82 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# limitations under the License.
+---
+- name: Check that device mapping file exists at mentioned path
+  stat:
+    path: "{{ mngmnt_mapping_file_path }}"
+  register: stat_result
+  tags: install
+
+- name: Fail if config file doesn't exist
+  fail:
+    msg: "{{ fail_msg_mapping_file + mngmnt_mapping_file_path }}"
+  when: not stat_result.stat.exists
+  tags: install
+
+- name: Read device mapping file from CSV file and return a dictionary
+  read_csv:
+    path: "{{ mngmnt_mapping_file_path }}"
+    key: "{{ mapping_file_key }}"
+  register: device_mapping_file
+  delegate_to: localhost
+  tags: install
+
+- name: Check if header is present in mapping file
+  shell:  set -o pipefail && awk 'NR==1 { print $1}' "{{ mngmnt_mapping_file_path }}"
+  register: mngmnt_header
+  changed_when: false
+  tags: install
+
+- name: Fail if header not in correct format
+  fail:
+    msg: "{{ fail_device_mapping_file_header }}"
+  when: mngmnt_header.stdout !=  device_mapping_header_format
+  tags: install
+
+- name: Check if mapping file is comma seperated
+  shell: awk -F\, '{print NF-1}' "{{ mngmnt_mapping_file_path }}"
+  register: mngmnt_comma_seperated
+  changed_when: false
+  tags: install
+
+- name: Fail if not comma seperated or if all fields are not given
+  fail:
+    msg: "{{ fail_mapping_file_field_seperation }}"
+  when: not(item =="1")
+  with_items: "{{ mngmnt_comma_seperated.stdout_lines }}"
+  tags: install
+
+- name: Initialize count variables
+  set_fact:
+    list_of_ips: []
+    count_total_items: "{{ device_mapping_file.dict |length }}"
+  tags: install
+
+- name: Create list of IPs in mapping file
+  set_fact:
+    list_of_ips: "{{ [ item.value.IP ] + list_of_ips }}"
+  loop: "{{ device_mapping_file.dict | dict2items }}"
+  loop_control:
+    label: "{{ item.value.MAC }}"
+  tags: install
+
+- name: Find count of unique IPs
+  set_fact:
+    count_of_unique_ip : "{{ list_of_ips| unique| length }}"
+  tags: install
+
+- name: Validation to check if unique IPs are provided for each node
+  fail:
+    msg: "{{ fail_mapping_file_duplicate_ip + mngmnt_mapping_file_path }}"
+  when: not(count_of_unique_ip|int == count_total_items|int)
+  tags: install

+ 179 - 0
control_plane/roles/control_plane_common/tasks/validate_host_mapping_file.yml

@@ -0,0 +1,179 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# limitations under the License.
+---
+- name: Check that host mapping file exists at mentioned path
+  stat:
+    path: "{{ host_mapping_file_path }}"
+  register: stat_result
+  tags: install
+
+- name: Fail if config file doesn't exist
+  fail:
+    msg: "{{ fail_msg_mapping_file + host_mapping_file_path }}"
+  when: not stat_result.stat.exists
+  tags: install
+
+- name: Read host mapping file from CSV file and return a dictionary
+  read_csv:
+    path: "{{ host_mapping_file_path }}"
+    key: "{{ mapping_file_key }}"
+  register: mapping_file
+  delegate_to: localhost
+  tags: install
+
+- name: Initialize variable for role support in mapping file
+  set_fact:
+    component_role_support: false
+  tags: install
+
+- name: Check if header is present in mapping file
+  shell:  set -o pipefail && awk 'NR==1 { print $1}' "{{ host_mapping_file_path }}"
+  register: mngmnt_header
+  changed_when: false
+  tags: install
+
+- name: Fail if header not in correct format
+  assert:
+    that: (mngmnt_header.stdout ==  host_mapping_header_format) or (mngmnt_header.stdout == host_mapping_header_with_role_format)
+    fail_msg: "{{ fail_mapping_file_header }}"
+  tags: install
+
+- name: Check if mapping file is comma seperated
+  shell: awk -F\, '{print NF-1}' "{{ host_mapping_file_path }}"
+  register: mngmnt_comma_seperated
+  changed_when: false
+  tags: install
+
+- name: Set variable if component roles given in mapping file
+  set_fact:
+    component_role_support: true
+  when: mngmnt_header.stdout == host_mapping_header_with_role_format
+  tags: install
+
+- name: Fail if not comma seperated or if all fields are not given for MAC,Hostname,IP,Component_role
+  fail:
+    msg: "{{ fail_mapping_file_field_seperation }}"
+  when: not(item =="3") and not (item == "-1") and component_role_support
+  with_items: "{{ mngmnt_comma_seperated.stdout_lines }}"
+  tags: install
+
+- name: Fail if not comma seperated or if all fields are not given for MAC,Hostname,IP
+  fail:
+    msg: "{{ fail_mapping_file_field_seperation }}"
+  when: not(item =="2") and not (item == "-1") and not(component_role_support)
+  with_items: "{{ mngmnt_comma_seperated.stdout_lines }}"
+  tags: install
+
+- name: Initialize count variables
+  set_fact:
+    list_of_ips: []
+    list_of_roles: []
+    list_of_hostnames: []
+    count_of_manager: 0
+    count_of_compute: 0
+    count_of_nfs_node: 0
+    count_of_login: 0
+    count_total_items: "{{ mapping_file.dict |length }}"
+  tags: install
+
+- name: Create list of IPs and component roles and hostnames defined in mapping file
+  set_fact:
+    list_of_ips: "{{ [ item.value.IP ] + list_of_ips }}"
+    list_of_hostnames: "{{ [ item.value.Hostname ] + list_of_hostnames }}"
+  loop: "{{ mapping_file.dict | dict2items }}"
+  loop_control:
+    label: "{{ item.value.MAC }}"
+  tags: install
+
+- name: Create list of component roles defined in mapping file
+  set_fact:
+    list_of_roles: "{{ [ item.value.Component_role ] + list_of_roles }}"
+  loop: "{{ mapping_file.dict | dict2items }}"
+  loop_control:
+    label: "{{ item.value.MAC }}"
+  when: component_role_support
+  tags: install
+
+- name: Assert hostnames
+  assert:
+    that:
+      - '"_" not in item'
+      - '"." not in item'
+      - '" " not in item'
+    quiet: yes
+    fail_msg: "{{ fail_mapping_file_hostname_chars + item }}"
+  with_items: "{{ list_of_hostnames }}"
+  tags: install
+
+- name: Find count of unique IPs
+  set_fact:
+    count_of_unique_ip : "{{ list_of_ips| unique| length }}"
+  tags: install
+
+- name: Validation to check if unique IPs are provided for each node
+  fail:
+    msg: "{{ fail_mapping_file_duplicate_ip + host_mapping_file_path }}"
+  when: not(count_of_unique_ip|int == count_total_items|int)
+  tags: install
+
+- name: Find count of unique hostnames
+  set_fact:
+    count_of_unique_hostnames : "{{ list_of_hostnames | unique | length }}"
+  tags: install
+
+- name: Validation to check if unique hostnames are provided for each node
+  fail:
+    msg: "{{ fail_mapping_file_duplicate_hostname }}"
+  when: not(count_of_unique_hostnames|int == count_total_items| int)
+  tags: install
+
+- name: Find count of each component role defined in mapping file
+  include_tasks: count_component_roles.yml
+  loop: "{{ list_of_roles }}"
+  when: component_role_support
+  tags: install
+
+- block:
+  - name: Validation to check if component roles for each node is defined
+    fail:
+      msg: "{{ fail_mapping_file_roles_error }}"
+    when: not( count_total_items|int == (count_of_manager|int + count_of_compute|int + count_of_login|int + count_of_nfs_node|int))
+
+  - name: Validation to check number of manager nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_manager_role }}"
+    when: not (count_of_manager | int  == 1)
+
+  - name: Validation to check number of compute nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_compute_role }}"
+    when: count_of_compute|int  < 1
+
+  - name: Validation to check number of login nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_login_role }}"
+    when: not ( count_of_login|int == 1)
+
+  - name: Validation to check number of nfs nodes defined
+    fail:
+      msg: "{{ fail_mapping_file_nfs_role }}"
+    when: powervault_support and not (count_of_nfs_node|int == 1)
+  tags: install
+
+  rescue:
+  - name: Count of roles defined
+    fail:
+      msg: "{{ count_of_roles_defined }}"
+    tags: install
+
+  when: component_role_support

+ 30 - 0
control_plane/roles/control_plane_common/vars/main.yml

@@ -140,3 +140,33 @@ nfs_services:
   - mountd
   - rpc-bind
   - nfs
+
+# Usage: validate_host_mapping_file.yml
+fail_msg_mapping_file: "Mapping file doesn't exist at given path: "
+mapping_file_key: "MAC"
+fail_mapping_file_header: "Header of csv file is not in correct format.
+                          It should be of the format: MAC,Hostname,IP,Component_role or MAC,Hostname,IP"
+host_mapping_header_format: "MAC,Hostname,IP"
+host_mapping_header_with_role_format: "MAC,Hostname,IP,Component_role"
+fail_mapping_file_field_seperation: "Failed: Mapping file should be comma separated and all fields must be filled."
+fail_mapping_file_duplicate_ip: "Failed: Duplicate ip exists. Please verify following mapping file again: "
+fail_mapping_file_duplicate_hostname: "Failed: Duplicate hostname exists. Please verify host mapping file again."
+fail_mapping_file_hostname_chars: "Hostname should not contain _ or . or space as it will cause error with slurm and K8s. Found in: "
+fail_mapping_file_roles_error: "Failed. Define correct Component Roles for each node.
+                                Component roles can only take values: {{ group_name_manager }}, {{group_name_compute}},
+                                 {{ group_name_login }}, {{ group_name_nfs }}"
+fail_mapping_file_manager_role: "Exactly 1 manager node must be defined"
+fail_mapping_file_compute_role: "Atleast 1 compute node must be defined"
+fail_mapping_file_login_role: "Exactly 1 login node must be defined"
+fail_mapping_file_nfs_role: "Exactly 1 nfs node must be defined"
+count_of_roles_defined: "Component Roles defined: Manager Node: {{ count_of_manager }},
+                        Compute Nodes: {{ count_of_compute }}, Login Node: {{ count_of_login }},
+                        Nfs Node: {{ count_of_nfs_node }}, Total Nodes: {{ count_total_items }} "
+group_name_manager: "manager"
+group_name_compute: "compute"
+group_name_login: "login_node"
+group_name_nfs: "nfs_node"
+
+# Usage: validate_device_mapping_file.yml
+fail_device_mapping_file_header: "Failed: Header (MAC,IP) should be present in the mapping file."
+device_mapping_header_format: "MAC,IP"

+ 14 - 6
control_plane/roles/powervault_me4/tasks/map_volume.yml

@@ -24,6 +24,7 @@
   changed_when: false
   no_log: true
   register: config_content
+  delegate_to: localhost
   run_once: true
 
 - name: Decrpyt login_vars.yml
@@ -32,6 +33,7 @@
     --vault-password-file {{ login_pv_vault_file }}
   changed_when: false
   run_once: true
+  delegate_to: localhost
   when: "'$ANSIBLE_VAULT;' in config_content.stdout"
 
 - name: Include variable file login_vars.yml
@@ -57,9 +59,18 @@
   delegate_to: localhost
   tags: install
 
+- name: Get map port
+  set_fact:
+    map_port: "{{ item.0 }}"
+  when: hostvars['pv']['map_ip'] == item.1
+  with_together:
+    - "{{ up_port }}"
+    - "{{ set_port_ip }}"
+  register: output
+
 - name: Map volume
   uri:
-    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_k8s_volume_name }}/access/{{ access }}/ports/{{ item.0 }}/lun/{{ lun1 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn'] }}
+    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_k8s_volume_name }}/access/{{ access }}/ports/{{ map_port }}/lun/{{ lun1 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn'] }}
     method: GET
     body_format: json
     validate_certs: no
@@ -67,14 +78,12 @@
     headers:
       {'sessionKey': "{{ map_session_key.json.status[0].response }}", 'datatype':'json'}
   register: map_vol1
-  with_together:
-    - "{{ up_port }}"
   delegate_to: localhost
   tags: install
 
 - name: Map volume
   uri:
-    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_slurm_volume_name }}/access/{{ access }}/ports/{{ item.0 }}/lun/{{ lun2 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn']  }}
+    url: https://{{ groups['powervault_me4'][0] }}/api/map/volume/{{ powervault_me4_slurm_volume_name }}/access/{{ access }}/ports/{{ map_port }}/lun/{{ lun2 }}/initiator/{{ hostvars['server_iqdn_id']['server_iqdn']  }}
     method: GET
     body_format: json
     validate_certs: no
@@ -82,8 +91,6 @@
     headers:
       {'sessionKey': "{{ map_session_key.json.status[0].response }}", 'datatype':'json'}
   register: map_vol2
-  with_together:
-    - "{{ up_port }}"
   delegate_to: localhost
   tags: install
 
@@ -93,4 +100,5 @@
     --vault-password-file {{ login_pv_vault_file }}
   changed_when: false
   run_once: true
+  delegate_to: localhost
   when: "'$ANSIBLE_VAULT;' in config_content.stdout"

+ 5 - 0
control_plane/roles/powervault_me4/tasks/ports.yml

@@ -24,6 +24,7 @@
   changed_when: false
   no_log: true
   register: config_content
+  delegate_to: localhost
   run_once: true
 
 - name: Decrpyt login_vars.yml
@@ -32,6 +33,7 @@
     --vault-password-file {{ login_pv_vault_file }}
   changed_when: false
   run_once: true
+  delegate_to: localhost
   when: "'$ANSIBLE_VAULT;' in config_content.stdout"
 
 - name: Include variable file login_vars.yml
@@ -53,6 +55,7 @@
       {'datatype': 'json'}
     validate_certs: no
   register: port_session_key
+  delegate_to: localhost
   tags: install
 
 - name: Show ports
@@ -65,6 +68,7 @@
     headers:
       {'sessionKey': "{{ port_session_key.json.status[0].response }}", 'datatype':'json'}
   register: show_ports
+  delegate_to: localhost
   tags: install
 
 - name: Up ports
@@ -95,4 +99,5 @@
   with_together: 
     - "{{ set_port_ip }}"
     - "{{ up_port }}"
+  delegate_to: localhost
   tags: install

+ 4 - 4
control_plane/roles/powervault_me4/tasks/pv_me4_prereq.yml

@@ -57,14 +57,14 @@
 
 - name: Get the product id
   set_fact:
-    pv_id: system_info.json.system[0]['product-id']
+    pv_id: "{{ system_info.json.system[0]['product-id'] }}"
 
 - name: Verify the product id and model no. of device
   fail:
     msg: "{{ fail_pv_support }}"
   when:
-    - scsi_product_id in system_info.json.system[0]['scsi-product-id']
-    - pv_id  == "ME4084" or pv_id == "ME4024"  or pv_id == "ME4012"
+    - scsi_product_id not in system_info.json.system[0]['scsi-product-id']
+    - pv_id  != "ME4084" or pv_id != "ME4024"  or pv_id != "ME4012"
 
 - name: Set system name
   uri:
@@ -76,5 +76,5 @@
     headers:
       {'sessionKey': "{{ session_key.json.status[0].response }}", 'datatype':'json'}
   register: system_name
-  when: powervault_me4_system_name != ""
+  when: powervault_me4_system_name
   tags: install

+ 15 - 3
control_plane/roles/powervault_me4/tasks/pv_validation.yml

@@ -33,6 +33,7 @@
 - name: Check if system name has space
   shell: echo {{ powervault_me4_system_name }} | grep '\s' -c
   register: space_count
+  changed_when: false
   ignore_errors: true
   tags: install
 
@@ -46,6 +47,7 @@
 - name: Check if volume name has space
   shell: echo {{ powervault_me4_k8s_volume_name }} | grep '\s' -c
   register: vol_count1
+  changed_when: false
   ignore_errors: true
   tags: install
 
@@ -59,6 +61,7 @@
 - name: Check if volume name has space
   shell: echo {{ powervault_me4_slurm_volume_name }} | grep '\s' -c
   register: vol_count2
+  changed_when: false
   ignore_errors: true
   tags: install
 
@@ -80,6 +83,7 @@
   assert:
     that:
       - disk_count.stdout == "0"
+      - powervault_me4_disk_group_name | length > 1
       - powervault_me4_disk_group_name | length < 30
     msg: "{{ system_name_wrong }}" 
 
@@ -89,7 +93,7 @@
       - powervault_me4_snmp_notify_level | length >1
       - powervault_me4_snmp_notify_level == "crit" or powervault_me4_snmp_notify_level == "error" or powervault_me4_snmp_notify_level == "warn" or powervault_me4_snmp_notify_level == "resolved" or powervault_me4_snmp_notify_level == "info" or powervault_me4_snmp_notify_level == "none"
     fail_msg: "{{ snmp_wrong_value }}"
-    success_msg: "{{ snmp_success }}" 
+    success_msg: "{{ snmp_success }}"
 
 - name: Assert RAID value
   assert:
@@ -113,12 +117,20 @@
     that: 
       - powervault_me4_pool == "a" or powervault_me4_pool == "A" or powervault_me4_pool == "b" or powervault_me4_pool == "B"
     msg: "{{ wrong_pool }}"
+  when: powervault_me4_pool_type == "virtual"
+
+- name: Check pool type
+  assert:
+    that:
+      - powervault_me4_pool_type | length > 1
+      - powervault_me4_pool_type | lower == "virtual" or powervault_me4_pool_type | lower == "linear"
+    msg: "{{ wrong_pool_type }}"
 
 - name: Check parition percentage
   assert:
     that:
       - powervault_me4_disk_partition_size|int
-      - powervault_me4_disk_partition_size|int < 99
+      - powervault_me4_disk_partition_size|int < 90
       - powervault_me4_disk_partition_size|int > 5
     msg: "{{ wrong_partition }}"
 
@@ -133,4 +145,4 @@
 - name: Assert the nic provided
   assert:
     that:
-      - powervault_me4_server_nic | length > 2
+      - powervault_me4_server_nic | length > 2

+ 76 - 7
control_plane/roles/powervault_me4/tasks/volume.yml

@@ -30,22 +30,45 @@
   register: vol_session_key
   tags: install
 
-- name: Add disk group
+- name: Add disk group in virtual pool
   uri:
-    url: https://{{ inventory_hostname }}/api/add/disk-group/type/{{ type }}/disks/{{ powervault_me4_disk_range }}/level/{{ powervault_me4_raid_levels }}/pool/{{ powervault_me4_pool }}/{{ powervault_me4_disk_group_name }}
+    url: https://{{ inventory_hostname }}/api/add/disk-group/type/{{ powervault_me4_pool_type }}/disks/{{ powervault_me4_disk_range }}/level/{{ powervault_me4_raid_levels }}/pool/{{ powervault_me4_pool }}/{{ powervault_me4_disk_group_name }}
     method: GET
     body_format: json
     validate_certs: no
     use_proxy: no
     headers:
       {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
-  register: pv_disk
+  register: pv_disk1
+  when: powervault_me4_pool_type|lower == "virtual"
+  tags: install
+
+- name: Add disk group in linear pool
+  uri:
+    url: https://{{ inventory_hostname }}/api/add/disk-group/type/{{ powervault_me4_pool_type }}/disks/{{ powervault_me4_disk_range }}/level/{{ powervault_me4_raid_levels }}/{{ powervault_me4_disk_group_name }}
+    method: GET
+    body_format: json
+    validate_certs: no
+    use_proxy: no
+    headers:
+      {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
+  register: pv_disk2
+  when: powervault_me4_pool_type|lower == "linear"
   tags: install
 
 - name: Assert if disk group created or not
   fail:
-    msg: "{{ pv_disk.json.status[0].response }}"
-  when:  pv_disk.json.status[0] ['response-type'] == "Error"
+    msg: "{{ pv_disk1.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_type|lower == "virtual"
+    - pv_disk1.json.status[0] ['response-type'] == "Error"
+
+- name: Assert if disk group created or not
+  fail:
+    msg: "{{ pv_disk2.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_typ|lower e== "linear"
+    - pv_disk2.json.status[0] ['response-type'] == "Error"
 
 - name: Create volume1
   uri:
@@ -57,6 +80,7 @@
     headers:
       {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
   register: pv_vol1
+  when: powervault_me4_pool_type|lower == "virtual"
   tags: install
 
 - name: Create volume2
@@ -69,14 +93,59 @@
     headers:
       {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
   register: pv_vol2
+  when: powervault_me4_pool_type|lower == "virtual"
+  tags: install
+
+- name: Create volume1
+  uri:
+    url: https://{{ inventory_hostname }}/api/create/volume/size/{{ powervault_me4_volume_size }}/pool/{{ powervault_me4_disk_group_name }}/{{ powervault_me4_k8s_volume_name }}
+    method: GET
+    body_format: json
+    validate_certs: no
+    use_proxy: no
+    headers:
+      {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
+  register: pv_vol3
+  when: powervault_me4_pool_type|lower == "linear"
+  tags: install
+
+- name: Create volume2
+  uri:
+    url: https://{{ inventory_hostname }}/api/create/volume/size/{{ powervault_me4_volume_size }}/pool/{{ powervault_me4_disk_group_name }}/{{ powervault_me4_slurm_volume_name }}
+    method: GET
+    body_format: json
+    validate_certs: no
+    use_proxy: no
+    headers:
+      {'sessionKey': "{{ vol_session_key.json.status[0].response }}", 'datatype':'json'}
+  register: pv_vol4
+  when: powervault_me4_pool_type|lower == "linear"
   tags: install
 
 - name: Assert if k8s_volume created correctly
   fail:
     msg: "{{ pv_vol1.json.status[0].response }}"
-  when: pv_vol1.json.status[0]['response-type'] == "Error"
+  when:
+    - powervault_me4_pool_type| lower == "virtual"
+    - pv_vol1.json.status[0]['response-type'] == "Error"
 
 - name: Assert if slurm_volume created correctly
   fail:
     msg: "{{ pv_vol2.json.status[0].response }}"
-  when: pv_vol2.json.status[0]['response-type'] == "Error"
+  when:
+    - powervault_me4_pool_type| lower == "virtual"
+    - pv_vol2.json.status[0]['response-type'] == "Error"
+
+- name: Assert if k8s_volume created correctly
+  fail:
+    msg: "{{ pv_vol3.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_type| lower == "linear"
+    - pv_vol3.json.status[0]['response-type'] == "Error"
+
+- name: Assert if slurm_volume created correctly
+  fail:
+    msg: "{{ pv_vol4.json.status[0].response }}"
+  when:
+    - powervault_me4_pool_type|lower == "linear"
+    - pv_vol4.json.status[0]['response-type'] == "Error"

+ 1 - 2
control_plane/roles/powervault_me4/vars/main.yml

@@ -23,7 +23,7 @@ correct_disk_range: "Succes: Disk range is correct"
 wrong_pool: "Failed: Given pool value is wrong"
 wrong_partition: "Failed: Given partition is wrong"
 wrong_vol_size: "Failed: Given volume size is wrong"
-
+wrong_pool_type: "Failed: Given pool type value is wrong"
 
 # Usage: pv_me4_prereq.yml
 scsi_product_id: ME4
@@ -37,7 +37,6 @@ base_pv_file: "{{ role_path }}/../../input_params/base_vars.yml"
 # Usage: volume.yml
 lun1: 0
 lun2: 1
-type: virtual
 
 # Usage: ports.yml
 port_ip: 192.168.25.

+ 709 - 0
control_plane/test/test_control_plane.yml

@@ -0,0 +1,709 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Testase OMNIA_1.1_MS_TC_001
+# Test Case to validate the execution of control_plane.yml with valid inputs -- Default Test Case
+- name: OMNIA_1.1_MS_TC_001
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: VERIFY_OMNIA_01
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+             
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+          tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_002
+# Test Case to validate the execution of control_place.yml with no input
+- name: OMNIA_1.1_MS_TC_002
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_002
+
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc02 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane_common role
+          include_role:
+            name: ../roles/control_plane_common
+          vars:
+            base_vars_filename: ../input_params/base_vars.yml
+      rescue:
+        - name: Validate error
+          assert:
+            that: input_base_failure_msg in ansible_failed_result.msg
+            success_msg: "{{ input_config_check_success_msg }}"
+            fail_msg: "{{ input_config_check_fail_msg }}"
+      tags: Execute_common_role
+    
+# Testcase OMNIA_1.1_MS_TC_003 and OMNIA_1.1_MS_TC_004
+# Test Case to validate the execution of control_plane.yml with NFS share already present
+- name: OMNIA_1.1_MS_TC_003
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_003,TC_004
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+    
+    - name: Creating new control_plane.yml
+      copy:
+        dest: "../test_control_plane.yml"
+        content: |
+         - name: Executing omnia roles
+           hosts: localhost
+           connection: local
+           roles:
+              - control_plane_common
+              - control_plane_repo
+        mode: '0644'
+      tags: Replace_control_plane
+      
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook test_control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          tags: Execute_control_plane
+    
+    - block:
+        - name: Execute validation script
+          include_tasks: "{{ control_plane_validation_script_path }}" 
+          tags: Execute_Validation_Script
+      
+    - name: Delete newly created control_plane.yml
+      file:
+        state: absent
+        path: ../test_control_plane.yml
+      when: foo_stat.stat.exists
+      tags: Delete_test_control_plane
+
+# Testcase OMNIA_1.1_MS_TC_005
+# Test Case to validate the execution of control_plane.yml after a successful run and validate k8s pods along with services after reboot.
+- name: OMNIA_1.1_MS_TC_005
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_005
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+      
+    - name: Check uptime
+      command: uptime -p
+      register: system_uptime
+      changed_when: false
+      tags: Check_Uptime
+      
+    - name: Extracting data from system_uptime
+      set_fact:
+        uptime_number: "{{ system_uptime.stdout.split()[1] }}"
+        uptime_min: "{{ system_uptime.stdout.split()[2] }}"
+    
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      when: uptime_number|int > 15
+      tags: Replace_input
+      
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          when: uptime_number|int > 15
+          tags: Execute_control_plane
+          
+        - name: Reboot system
+          command: reboot
+          when: uptime_number|int > 15
+          tags: Reboot_System
+    
+    - block:
+        - name: Wait for 30sec for kubectl to get things ready
+          pause:
+            seconds: 200
+          when: (uptime_number| int <= 15) and (uptime_min == "minutes")
+
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+          when: (uptime_number| int <= 15) and (uptime_min == "minutes")
+          tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_006 and OMNIA_1.1_MS_TC_007
+# Test Case to validate the execution of control_plane.yml and after a successful run the user deletes/stops all pods
+- name: OMNIA_1.1_MS_TC_006
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_006,TC_007
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: "0644"
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+      
+    - name: Delete all containers
+      command: kubectl delete --all namespaces
+      changed_when: false
+      tags: Delete_Pods
+      
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+      
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_008
+# Test Case to validate the execution of control_plane.yml with infiniband=false, powervault=true and ethernet=true
+- name: OMNIA_1.1_MS_TC_008
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_008
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - name: Editing base_vars.yml for powervault to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ powervault_false }}"
+        replace: "{{ powervault_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for ethernet to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ ethernet_false }}"
+        replace: "{{ ethernet_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for infiniband to false
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ infiniband_true }}"
+        replace: "{{ infiniband_false }}"
+      tags: Edit_base_vars
+        
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_009
+# Test Case to validate the execution of control_plane.yml with infiniband=true, powervault=false and ethernet=true
+- name: OMNIA_1.1_MS_TC_009
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_009
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - name: Editing base_vars.yml for powervault to false
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ powervault_true }}"
+        replace: "{{ powervault_false }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for ethernet to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ ethernet_false }}"
+        replace: "{{ ethernet_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for infiniband to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ infiniband_false }}"
+        replace: "{{ infiniband_true }}"
+      tags: Edit_base_vars
+        
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_010
+# Test Case to validate the execution of control_plane.yml with infiniband=true, powervault=true and ethernet=false
+- name: OMNIA_1.1_MS_TC_010
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_010
+
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - name: Editing base_vars.yml for powervault to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ powervault_false }}"
+        replace: "{{ powervault_true }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for ethernet to false
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ ethernet_true }}"
+        replace: "{{ ethernet_false }}"
+      tags: Edit_base_vars
+    
+    - name: Editing base_vars.yml for infiniband to true
+      replace:
+        path: ../input_params/base_vars.yml
+        regexp: "{{ infiniband_false }}"
+        replace: "{{ infiniband_true }}"
+      tags: Edit_base_vars
+        
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+
+# Testcase OMNIA_1.1_MS_TC_011
+# Test Case to validate the execution of control_plane.yml with firmware update set to False
+- name: OMNIA_1.1_MS_TC_011
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_011
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+      
+    - name: Set firmware update to false
+      replace:
+        path: ../input_params/idrac_vars.yml
+        regexp: "{{ fw_update_true }}"
+        replace: "{{ fw_update_false }}"
+      tags: Set_FW_Update
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+      
+    - name: Check if firmware updates folder exists
+      stat:
+        path: /var/nfs_repo/dellupdates
+      register: fw_update_dir
+      tags: Set_FW_Update
+      
+    - name: Verify firmware updates were downloaded
+      assert:
+        that:
+          - not fw_update_dir.stat.exists
+        success_msg: "{{ fw_success_validation }}"
+        fail_msg: "{{ fw_fail_validation }}"
+      tags: Set_FW_Update
+        
+# Testcase OMNIA_1.1_MS_TC_012
+# Test Case to validate the execution of control_plane.yml with firmware update set to true
+- name: OMNIA_1.1_MS_TC_012
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml   
+   
+  gather_subset:
+    - 'min'
+  tags: TC_012
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+      
+    - name: Set firmware update to true
+      replace:
+        path: ../input_params/idrac_vars.yml
+        regexp: "{{ fw_update_false }}"
+        replace: "{{ fw_update_true }}"
+      tags: Set_FW_Update
+        
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+     
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+      tags: Execute_control_plane
+       
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+      tags: Execute_Validation_Script
+      
+    - name: Check if firmware updates folder exists
+      stat:
+        path: /var/nfs_repo/dellupdates
+      register: fw_update_dir
+      tags: Set_FW_Update
+      
+    - name: Verify firmware updates were downloaded
+      assert:
+        that:
+          - fw_update_dir.stat.exists
+        success_msg: "{{ fw_success_validation }}"
+        fail_msg: "{{ fw_fail_validation }}"
+      tags: Set_FW_Update
+
+# Testcase OMNIA_1.1_MS_TC_013
+# Test Case to validate the execution of control_plane.yml with docker login credential
+- name: OMNIA_1.1_MS_TC_013
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/control_plane_common/vars/main.yml  
+    - test_vars/test_control_plane_vars.yml  
+   
+  gather_subset:
+    - 'min'
+  tags: TC_013
+  
+  tasks:
+    - name: Check OS Version
+      assert:
+        that:
+          - 'ansible_distribution == "{{ os_name }}"'
+          - 'ansible_distribution_version == "{{ os_version }}"'
+        success_msg: "{{ check_os_success_msg }}"
+        fail_msg: "{{ check_os_fail_msg }}"
+      tags: Check_OS
+    
+    - name: Replace input parameters folder
+      copy:
+        src: "{{ item }}"
+        dest: "{{ input_params_folder }}"
+        force: yes
+        mode: '0644'
+      with_items:
+        - "{{ input_files_tc01 }}"
+      tags: Replace_input
+      
+    - name: Change docker params in omnia_config.yml
+      replace:
+        path: ../../omnia_config.yml
+        regexp: "docker_username: .*$"
+        replace: 'docker_username: "{{ docker_user }}"'
+      tags: Set_Docker_Creds
+    
+    - name: Assert if the credentials are valid in test_control_plane_vars.yml
+      assert:
+        that:
+          - 'docker_user != "User"'
+          - 'docker_password != "Password"'
+        success_msg: "{{ valid_docker_creds }}"
+        fail_msg: "{{ invalid_docker_creds }}"
+      tags: Set_Docker_Creds
+    
+    - name: Change docker params in omnia_config.yml
+      replace:
+        path: ../../omnia_config.yml
+        regexp: "docker_password: .*$"
+        replace: 'docker_password: "{{ docker_password }}"'
+      tags: Set_Docker_Creds
+    
+    - block:    
+        - name: Execute control_plane.yml playbook
+          command: ansible-playbook control_plane.yml
+          args:
+            chdir: "{{ control_plane_dir }}"
+          tags: Execute_control_plane
+    
+    - block:
+        - name: Execute default validation script
+          include_tasks: "{{ control_plane_validation_script_path }}"
+          tags: Execute_Validation_Script
+      
+    - name: Fetch docker info
+      shell: docker login & sleep 3
+      register: new
+      changed_when: false
+      tags: Set_Docker_Creds
+
+    - name: Assert that docker was used to pull images 
+      assert:
+        that:
+          - "'Login did not succeed' in new.stderr"
+        success_msg: "{{ docker_success_validation }}"
+        fail_msg: "{{ docker_fail_validation }}"
+      tags: Set_Docker_Creds

+ 271 - 0
control_plane/test/test_control_plane_validation.yml

@@ -0,0 +1,271 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---  
+
+- block:
+
+    - name: Fetch Package info
+      package_facts:
+        manager: auto
+      
+    - name: Verify all packages are installed
+      assert:
+        that: "'{{ item }}' in ansible_facts.packages"
+        success_msg: "{{ install_package_success_msg }}"
+        fail_msg: "{{ install_package_fail_msg }}"
+      when: "'python-docker' not in item"
+      with_items: "{{ common_packages }}"
+      ignore_errors: true
+      
+    - name: Check login_vars is encrypted
+      command: cat {{ login_vars_filename }}
+      changed_when: false
+      register: config_content
+       
+    - name: Validate login file is encypted or not
+      assert:
+        that: "'$ANSIBLE_VAULT;' in config_content.stdout"
+        fail_msg: "{{ login_vars_fail_msg }}"
+        success_msg: "{{ login_vars_success_msg }}"
+            
+#  Installing a required package : JQ      
+    - name: Installing jq (JSON Query)
+      package:
+        name: "{{ test_package }}"
+        state: present
+           
+#  Checking if all the required pods are working
+    - name: Get pods info
+      shell: kubectl get pods --all-namespaces
+      register: all_pods_info
+          
+    - name: Check the count of pods
+      set_fact:
+         count: "{{ all_pods_info.stdout_lines|length - 1 }}"
+          
+    - name: Check if all the pods are running
+      assert:
+        that:
+          - "'Running' in all_pods_info.stdout_lines[{{ item }}]"
+        fail_msg: "{{ check_pods_fail_msg }}"
+        success_msg: "{{ check_pods_success_msg }}"
+      with_sequence: start=1 end={{ count }}
+      
+#  Checking if NFS Server is running and Custom ISO is created
+    - name: Get NFS Stat
+      shell: systemctl status nfs-idmapd
+      register: nfstat_info
+       
+    - name: Verify NFS Stat is running
+      assert:
+        that:
+          - "'Active: active (running)' in nfstat_info.stdout"
+        success_msg: "{{ nfs_share_success_msg }}"
+        fail_msg: "{{ nfs_share_fail_msg }}"
+        
+    - name: Check nfs mount point
+      stat:
+        path: "{{ nfs_mount_Path }}"
+      register: nfs_mount_info
+          
+    - name: Verify nfs share is mounted
+      assert:
+        that:
+          - "{{ nfs_mount_info.stat.exists }}"
+        success_msg: "{{ nfs_mount_success_msg }}"
+        fail_msg: "{{ nfs_mount_fail_msg }}"
+           
+    - name: Check Custom ISO
+      stat:
+        path: "{{ check_iso_path }}"
+      register: check_iso_info
+          
+    - name: Verify Custom ISO is created in the NFS repo
+      assert:
+        that:
+          - "{{ check_iso_info.stat.exists }}"
+        success_msg: "{{ check_iso_success_msg }}"
+        fail_msg: "{{ check_iso_fail_msg }}"
+      
+#  Checking if network-config container is running
+    
+    - name: Get Pod info for network-config
+      shell: |
+         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id) \(.metadata.name) \(.state)"'
+      register: network_config_pod_info
+          
+    - name: Get Pod Status for network-config
+      assert:
+        that:
+          - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
+        success_msg: "{{ network_config_pod_success_msg }}"
+        fail_msg: "{{ network_config_pod_fail_msg }}"
+         
+    - name: Get Pod facts
+      shell: |
+            crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "network-config" and .labels."io.kubernetes.container.name" == "mngmnt-network-container") | "\(.id)"'
+      register: network_config_pod_fact
+         
+    - name: Parse container id for the pods
+      set_fact: 
+        container_id: "{{ network_config_pod_fact.stdout[1:-1] }}"   
+          
+    - name: Check dhcpd,xinetd service is running
+      command: crictl exec {{ container_id }} systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: pod_service_check
+      with_items:
+        - dhcpd
+        - xinetd
+            
+    - name: Verify dhcpd, xinetd service is running
+      assert:
+        that:
+          - "'active' in pod_service_check.results[{{ item }}].stdout"
+          - "'inactive' not in pod_service_check.results[{{ item }}].stdout"
+          - "'unknown' not in pod_service_check.results[{{ item }}].stdout"
+        fail_msg: "{{ pod_service_check_fail_msg }}"
+        success_msg: "{{ pod_service_check_success_msg }}"
+      with_sequence: start=0 end={{ pod_service_check.results|length - 1 }}
+         
+# Checking if cobbler-container is running
+    - name: Get Pod info for cobbler
+      shell: |
+         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id) \(.metadata.name) \(.state)"'
+      register: network_config_pod_info
+      
+    - name: Get Pod Status for cobbler
+      assert:
+        that:
+          - network_config_pod_info.stdout_lines | regex_search( "{{ container_info }}")
+        success_msg: "{{ cobbler_pod_success_msg }}"
+        fail_msg: "{{ cobbler_pod_fail_msg }}"
+      
+    - name: Get Pod facts for cobbler
+      shell: |
+            crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "cobbler") | "\(.id)"'
+      register: network_config_pod_fact
+      
+    - name: Extract cobbler pod id
+      set_fact: 
+        cobbler_id: "{{ network_config_pod_fact.stdout[1:-1] }}"   
+      
+    - name: Check tftp,dhcpd,xinetd,cobblerd service is running
+      command: crictl exec {{ cobbler_id }} systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: pod_service_check
+      with_items:
+        - dhcpd
+        - tftp
+        - xinetd
+        - cobblerd
+        
+    - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
+      assert:
+        that:
+          - "'active' in pod_service_check.results[{{  item  }}].stdout"
+          - "'inactive' not in pod_service_check.results[{{  item  }}].stdout"
+          - "'unknown' not in pod_service_check.results[{{  item  }}].stdout"
+        fail_msg: "{{pod_service_check_fail_msg}}"
+        success_msg: "{{pod_service_check_success_msg}}"
+      with_sequence: start=0 end=3
+
+# Checking Cron-Jobs
+    - name: Check crontab list
+      command: crictl exec {{ cobbler_id }} crontab -l
+      changed_when: false
+      register: crontab_list
+
+    - name: Verify crontab list
+      assert:
+        that:
+          - "'* * * * * /usr/bin/ansible-playbook /root/tftp.yml' in crontab_list.stdout"
+          - "'*/5 * * * * /usr/bin/ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
+        fail_msg: "{{cron_jobs_fail_msg}}"
+        success_msg: "{{cron_jobs_success_msg}}"
+
+#  Checking subnet-manger pod is running and open sm is running 
+#  Comment if infiniband is not connected
+    - name: Fetch subnet-manager stats
+      shell: kubectl get pods -n subnet-manager 
+      register: sm_manager_info
+
+    - name: Verify subnet_manager container is running
+      assert:
+        that:
+          - "'Running' in sm_manager_info.stdout_lines[1]"
+        fail_msg: "{{subnet_manager_fail_msg}}"
+        success_msg: "{{subnet_manager_success_msg}}"
+
+# Checking awx pod is running
+
+    - name: Get Pod info for awx
+      shell: |
+         crictl ps -o json | jq '.containers[] | select(.labels."io.kubernetes.pod.namespace" == "awx") | "\(.id) \(.metadata.name) \(.state)"'
+      register: awx_config_pod_info
+           
+    - name: Get Pod Status for awx
+      assert:
+        that:
+          - network_config_pod_info.stdout_lines[{{ item }}] | regex_search( "{{ container_info }}")
+        success_msg: "{{ awx_pod_success_msg }}"
+        fail_msg: "{{ awx_pod_fail_msg }}"
+      ignore_errors: yes
+      with_sequence: start=0 end={{ network_config_pod_info.stdout_lines |length - 1 }}
+          
+    - name: Get pvc stats
+      shell: |
+          kubectl get pvc -n awx -o json |jq '.items[] | "\(.status.phase)"'
+      register: pvc_stats_info
+            
+    - name: Verify if pvc stats is running
+      assert:
+        that:
+          - "'Bound' in pvc_stats_info.stdout"
+        fail_msg: "{{ pvc_stat_fail_msg }}"
+        success_msg: "{{ pvc_stat_success_msg }}"
+      with_sequence: start=0 end={{ pvc_stats_info.stdout_lines |length|int - 1 }}
+            
+    - name: Get svc stats
+      shell: kubectl get svc -n awx awx-service -o json
+      register: svc_stats_info
+           
+    - name: Verify if svc is up and running
+      assert:
+        that:
+          - "'Error from server (NotFound):' not in svc_stats_info.stdout"
+        success_msg: "{{ svc_stat_success_msg }}"
+        fail_msg: "{{ svc_stat_fail_msg }}"
+             
+    - name: Fetch Cluster IP from svc
+      shell: |
+          kubectl get svc -n awx -o json | jq '.items[] | select(.metadata.name == "awx-service") | "\(.spec.clusterIP)"'
+      register: cluster_ip_info
+           
+    - name: Check if connection to svc Cluster IP is enabled
+      uri:
+        url: http://{{ cluster_ip_info.stdout[1:-1] }}
+        follow_redirects: none
+        method: GET
+      ignore_errors: yes
+      register: cluster_ip_conn
+           
+    - name: Verify connection to svc cluster is working
+      assert:
+        that:
+          - cluster_ip_conn.status == 200
+        success_msg: "{{ svc_conn_success_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"
+        fail_msg: "{{ svc_conn_fail_msg }} : {{ cluster_ip_info.stdout[1:-1] }}"

+ 47 - 0
control_plane/test/test_eth_mtu.yml

@@ -0,0 +1,47 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Get running config and reload PS
+  hosts: ethernet
+  connection: network_cli
+  gather_facts: no
+  collections:
+   - dellemc.os10
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+  tasks:
+   - name: Set facts
+     set_fact:
+       ansible_ssh_user: "{{ username }}"
+       ansible_ssh_pass: "{{ password }}"
+     tags: mtu,reload
+
+   - name: View running configurations
+     dellos10_command:
+       commands: show interface ethernet {{ validation_port }}
+     register: var1
+     tags: mtu
+
+   - name: Print config
+     debug:
+       msg: "{{ var1 }}"
+     tags: mtu
+
+   - name: Reload switch
+     dellos10_command:
+       commands: 
+          - command: 'reload'
+            prompt: '\[confirm yes/no\]:?$'
+            answer: 'yes'
+     tags: reload

+ 346 - 0
control_plane/test/test_ethernet_config.yml

@@ -0,0 +1,346 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+---
+# Testacase OMNIA_1.1_EF_TC_007
+# Execute ethernet.yml with both valid Global and interface configs in ethernet_config.yml
+- name: OMNIA_1.1_EF_TC_007
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_007
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Back up of ethernet_config.yml
+      copy:
+        src: "{{ ethernet_config_dir }}"
+        dest: "{{ ethernet_config_backup_dir }}"
+        mode: "{{ file_perm }}"
+      tags: TC_007
+
+    - name: Executing ethernet role with default ethernet_config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"      
+
+    - block:
+       - name: Validate default flow
+         assert:
+           that:
+             - ethernet_success_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+    - name: Set MTU of port {{ port_num }}
+      lineinfile:
+       dest: "{{ ethernet_config_dir }}"
+       insertbefore: "{{ search_line }}"
+       line: "{{ add_mtu_line }}"
+    
+    - name: Executing ethernet role with default ethernet_config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - name: Getting MTU of ethernet {{ validation_port }}
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'mtu'
+      changed_when: false
+      register: mtu_out
+      tags: TC_007,TC_002
+       
+    - name: Validate role exec output pre and post MTU addition
+      assert:
+        that:          
+          - validate_mtu_line in mtu_out.stdout
+        success_msg: "{{ success_message }}"
+        fail_msg: "{{ fail_case }}"
+      changed_when: false
+      failed_when: false
+      tags: TC_007
+
+# Testacase OMNIA_1.1_EF_TC_005
+# Execute ethernet.yml with save_config set to False
+- name: OMNIA_1.1_EF_TC_005
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_005
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Reload switch
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'reload'
+      changed_when: false
+    
+    - name: Pausing for switch to come up
+      pause:
+        minutes: "{{ time_to_pause }}"
+        
+    - name: Getting MTU of ethernet {{ validation_port }}
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'mtu'
+      changed_when: false
+      register: mtu_out
+      
+    - block:
+       - name: Validate that MTU is changed
+         assert:
+           that:
+             - validate_mtu_line not in mtu_out.stdout
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+         failed_when: false
+                
+# Testacase OMNIA_1.1_EF_TC_006
+# Execute ethernet.yml with save_config set to True
+- name: OMNIA_1.1_EF_TC_006
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_006
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Set save_changes_to_startup to True in ethernet_vars
+      ansible.builtin.replace:
+        dest: "{{ ethernet_config_dir }}"
+        regexp: 'save_changes_to_startup: false'
+        replace: 'save_changes_to_startup: True'
+        
+    - name: Execute network_ethernet role as port 4 has mtu set in ethernet_vars
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - name: Reload switch
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'reload'
+      changed_when: false
+    
+    - name: Pausing for switch to come up
+      pause:
+        minutes: "{{ time_to_pause }}"
+        
+    - name: Getting MTU of ethernet {{ validation_port }}
+      command: ansible-playbook -i "{{ inventory_dir }}" "{{ get_mtu_dir }}" --tags 'mtu'
+      changed_when: false
+      register: mtu_out
+    
+    - block:
+       - name: Validate that MTU is changed
+         assert:
+           that:
+             - validate_mtu_line in mtu_out.stdout
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_010
+# Execute ethernet.yml with invalid Global and correct interface configs in ethernet_config.yml
+- name: OMNIA_1.1_EF_TC_010
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_010
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Making interface config invalid
+      lineinfile:
+        path: "{{ ethernet_config_dir }}"
+        insertafter: 'os10_config:'
+        line: 'gibberish inserted'
+      tags: TC_007
+
+    - name: Executing ethernet role with invalid global config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+
+    - block:
+       - name: Validate role exec output
+         assert:
+           that:
+             - ethernet_fail_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+
+# Testcase OMNIA_1.1_EF_TC_009
+# Validation of ethernet default configuration
+- name: OMNIA_1.1_EF_TC_009
+  hosts: ethernet
+  gather_facts: false
+  tags: VERIFY_OMNIA_01
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml    
+  tasks:
+    - name: Executing ethernet role
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}" 
+    
+    - block:
+       - name: Validate default flow
+         assert:
+           that:
+             - ethernet_success_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+         
+# Testcase OMNIA_1.1_EF_TC_011
+# Execute ethernet.yml with valid Global  and incorrect interface configs in ethernet_config.yml 
+- name: OMNIA_1.1_EF_TC_011
+  hosts: ethernet
+  gather_facts: false
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Making interface config invalid
+      lineinfile:
+        path: "{{ ethernet_config_dir }}"
+        insertafter: 'os10_interface:'
+        line: 'gibberish inserted'
+        
+    - name: Executing ethernet role with invalid interface config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+
+    - block:
+       - name: Validate role exec output
+         assert:
+           that:
+             - ethernet_fail_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"      
+
+
+# Testcase OMNIA_1.1_EF_TC_008
+# Execute ethernet.yml with only Global and no interface configs in ethernet_config.yml 
+- name: OMNIA_1.1_EF_TC_008
+  hosts: ethernet
+  gather_facts: false
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Retrieving ethernet_config backup
+      copy:
+        src: "{{ ethernet_config_backup_dir }}"
+        dest: "{{ ethernet_config_dir }}"
+        mode: "{{ file_perm }}"
+      tags: TC_008
+    
+    - name: Removing interface config from ethernet_config
+      ansible.builtin.command: sed -i '22,117d' "{{ ethernet_config_dir }}"
+      args:
+       warn: no
+      changed_when: false
+      tags: TC_008
+      
+    - name: Executing ethernet role with no interface config
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - block:
+       - name: Validate default flow
+         assert:
+           that:
+             - ethernet_success_msg in job_status.status
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+    - name: Restoring original ethernt_config
+      copy:
+        src: "{{ ethernet_config_backup_dir }}"
+        dest: "{{ ethernet_config_dir }}"
+        mode: "{{ file_perm }}"
+      tags: TC_008
+      
+    - name: Set save_changes_to_startup to True in ethernet_vars
+      ansible.builtin.replace:
+        dest: "{{ ethernet_config_dir }}"
+        regexp: 'save_changes_to_startup: false'
+        replace: 'save_changes_to_startup: True'
+        
+    - name: Execute network_ethernet role as port 4 has mtu set in ethernet_vars
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ eth_template_value }}"
+       job_template_name: "{{ eth_job_name }}"
+       playbook_path: "{{ eth_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      
+    - name: Set save_changes_to_startup to False in ethernet_vars
+      ansible.builtin.replace:
+        dest: "{{ ethernet_config_dir }}"
+        regexp: 'save_changes_to_startup: True'
+        replace: 'save_changes_to_startup: False'

+ 157 - 0
control_plane/test/test_ethernet_fact.yml

@@ -0,0 +1,157 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+---
+# Testcase OMNIA_1.1_EF_TC_002
+# Execute ethernetfacts.yml with valid IP with valid credentials in ethernet inventory group
+- name: OMNIA_1.1_EF_TC_002
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_002
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Execute ethernet_facts with valid creds and valid IP
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+      tags: TC_002
+   
+    - block:
+       - name: Validate default flow with valid IP and valid credentials
+         assert:
+           that:
+             - "'successful' in job_status.status"
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_003
+# Execute ethernetfacts.yml with Invalid IP in ethernet inventory group
+- name: OMNIA_1.1_EF_TC_003
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_003
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: setting ip
+      set_fact:
+        eth_host_name: "{{ random_ip }}"
+         
+    - name: Execute ethernet_facts with random IP
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+
+    - block:
+        - name: Validate invalid IP and valid credentials
+          assert:
+            that:
+              - "'failed' in job_status.status"
+            success_msg: "{{ success_message }}"
+            fail_msg: "{{ fail_case }}"
+          changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_001
+# Execute ethernetfacts.yml with no hosts in ethernet inventory group
+- name: OMNIA_1.1_EF_TC_001
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_001
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Execute ethernet_facts with no host details
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"       
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+    
+    - block:
+       - name: Validate no hosts and valid credentials
+         assert:
+           that:
+             - "'successful' in job_status.status"
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+
+# Testcase OMNIA_1.1_EF_TC_004
+# Execute ethernetfacts.yml with valid IP in ethernet inventory group with incorrect credentials
+- name: OMNIA_1.1_EF_TC_004
+  hosts: ethernet
+  gather_facts: false
+  tags: TC_004
+  connection: local
+  vars_files:
+    - test_vars/test_ethernet_vars.yml
+    - ../roles/webui_awx/vars/main.yml    
+    - ../input_params/base_vars.yml
+  tasks:
+    - name: Making ethernet_credentials invalid
+      tower_credential:
+        name: "ethernet_credential"
+        credential_type: "Machine"
+        inputs:
+          username: "{{ invalid_username }}"
+       
+    - name: Execute ethernet_facts with invalid credentials
+      vars:
+       inventory_name: "{{ eth_inventory_name }}"
+       host_name: "{{ eth_host_name }}"
+       template_name: "{{ fact_template_value }}"
+       job_template_name: "{{ fact_job_name }}"
+       playbook_path: "{{ eth_facts_playbook_path }}"
+       delete_status: true
+      include_tasks: "{{ awx_script_path }}"
+     
+    - block:
+       - name: Validate valid IP and invalid credentials
+         assert:
+           that:
+             - "'failed' in job_status.status"
+           success_msg: "{{ success_message }}"
+           fail_msg: "{{ fail_case }}"
+         changed_when: false
+         
+    - name: Set credentials back to default
+      tower_credential:
+        name: "ethernet_credential"
+        credential_type: "Machine"
+        inputs:
+          username: "{{ username }}"
+          password: "{{ password }}"

+ 5 - 0
control_plane/test/test_ethernet_inventory

@@ -0,0 +1,5 @@
+[ethernet]
+1.2.3.4
+
+[ethernet:vars]
+ansible_network_os= dellemc.os10.os10

+ 150 - 0
control_plane/test/test_vars/base_vars.yml

@@ -0,0 +1,150 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Path to directory hosting ansible config file (ansible.cfg file)
+# Default value is /etc/ansible
+# This directory is on the host running ansible, if ansible is installed using dnf
+# If ansible is installed using pip, this path should be set
+ansible_conf_file_path: /etc/ansible
+
+# This variable is used to enable ethernet switch configuration
+# It accepts boolean values "true" or "false". 
+# By default its value is "false".
+# If ethernet switch support is needed set this to "true"
+ethernet_switch_support: true
+
+# This variable is used to enable infiniband switch configuration
+# It accepts boolean values "true" or "false". 
+# By default its value is "false".
+# If infiniband configuration is needed set this to "true"
+ib_switch_support: true
+
+# This variable is used to enable powervault configuration
+# It accepts boolean values "true" or "false". 
+# By default its value is "false".
+# If powervault configuration is needed set this to "true"
+powervault_support: false
+
+# The nic/ethernet card that will be connected to the public internet.
+# Default value of nic is eno2
+public_nic: "eno2"
+
+# Kubernetes pod network CIDR for appliance k8s network
+# Make sure this value does not overlap with any of the host networks.
+# Default value is "192.168.0.0/16"
+appliance_k8s_pod_net_cidr: "192.168.0.0/16"
+
+### Usage: provision_idrac, network_ib, network_ethernet, powervault_me4 ###
+
+# The trap destination IP address is the IP address of the SNMP Server where the trap will be sent
+# If this variable is left blank, it means SNMP will be disabled
+# Provide a valid SNMP server IP
+snmp_trap_destination: ""
+
+# Provide the snmp community name needed
+# By default this is set to "public"
+snmp_community_name: "public"
+
+### Usage: webui_awx ###
+
+# Organization name that is created in AWX.
+# The default value is “DellEMC”
+awx_organization: "DellEMC"
+
+### Usage: provision_cobbler, provision_idrac ###
+
+# This variable is used to set node provisioning method
+# It accepts values: idrac, pxe
+# Default value is "idrac"
+# If provisioning needs to be done through cobbler, set it to "pxe"
+# If idrac license is not present, provisioning mode will be set to "pxe"
+provision_method: "idrac"
+
+# This is the timezone that will be set during provisioning of OS
+# Available timezones are provided in control_plane/common/files/timezone.txt
+# Default timezone will be "GMT"
+# Some of the other available timezones are EST,CET,MST,CST6CDT,PST8PDT
+timezone: "GMT"
+
+# This is the language that will be set during provisioning of the OS
+# Default language supported is "en-US"
+language: "en-US"
+
+# This is the path where the user has to place the iso image that needs to be provisioned in target nodes.
+# The iso file should be CentOS7-2009-minimal edition.
+# Other iso files are not supported.
+# Mandatory value required
+iso_file_path: "/root/CentOS-7-x86_64-Minimal-2009.iso"
+
+# Default lease time that will be used by dhcp
+# Its unit is seconds
+# Min: 21600 seconds
+# Default: 86400 seconds
+# Max: 31536000 seconds
+# Mandatory value required
+default_lease_time: "86400"
+
+### Usage: control_plane_device ###
+
+# The nic/ethernet card that needs to be connected to provision 
+# the fabric, idrac and powervault.
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is eno1
+mngmnt_network_nic: "eno1"
+
+# The dhcp range for assigning the IPv4 address
+# Example: 172.17.0.1
+# Mandatory value required
+mngmnt_network_dhcp_start_range: "172.19.0.101"
+mngmnt_network_dhcp_end_range: "172.19.0.200"
+
+# The mapping file consists of the MAC address and its respective IP address and Hostname.
+# The format of mapping file should be MAC,Hostname,IP and must be a CSV file.
+# Eg: xx:yy:zz:aa:bb,172.17.0.5
+# A template for mapping file exists in omnia/examples and is named as mapping_device_file.csv.
+# This depicts the path where user has kept the mapping file for DHCP configurations.
+mngmnt_mapping_file_path: ""
+
+### Usage: provision_cobbler ###
+
+# The nic/ethernet card that needs to be connected to provision the OS of bare metal servers
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is eno3
+host_network_nic: "eno3"
+
+# The dhcp range for assigning the IPv4 address
+# Example: 172.17.0.1
+# Mandatory value required
+host_network_dhcp_start_range: "172.17.0.101"
+host_network_dhcp_end_range: "172.17.0.200"
+
+# The mapping file consists of the MAC address and its respective IP address and Hostname.
+# The format of mapping file should be MAC,Hostname,IP and must be a CSV file.
+# Eg: xx:yy:zz:aa:bb,server,172.17.0.5,Group(if any)
+# A template for mapping file exists in omnia/examples and is named as mapping_host_file.csv.
+# This depicts the path where user has kept the mapping file for DHCP configurations.
+host_mapping_file_path: ""
+
+### Usage: control_plane_ib ###
+
+# The nic/ethernet card that needs to be connected to configure infiniband switch
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is ib0
+ib_network_nic: "ib0"
+
+# The dhcp range for assigning the IPv4 address
+# Example: 172.17.0.1
+ib_network_dhcp_start_range: "172.25.0.101"
+ib_network_dhcp_end_range: "172.25.0.200"

+ 81 - 0
control_plane/test/test_vars/login_vars.yml

@@ -0,0 +1,81 @@
+# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+### Usage: provision_cobbler, provison_idrac ###
+
+# Password used while deploying OS on bare metal servers.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+# Mandatory value required
+provision_password: "test@123"
+
+### Usage: provision_cobbler ###
+
+# Password used for cobbler
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+# Mandatory value required
+cobbler_password: "test@123"
+
+### Usage: provision_idrac ###
+
+# The username for idrac
+# The username must not contain -,\, ',"
+# Mandatory value required
+idrac_username: "root"
+
+# Password used for idrac
+# The password must not contain -,\, ',"
+# Mandatory value required
+idrac_password: "calvin"
+
+### Usage: webui_awx ###
+
+# Password used for awx UI
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+#awx_password: ""
+
+### Usage: network_ethernet ###
+
+# The username for ethernet switch
+# The username must not contain -,\, ',"
+ethernet_switch_username: "admin"
+
+# Password used for ethernet switch
+# The password must not contain -,\, ',"
+ethernet_switch_password: "admin"
+
+### Usage: network_ib ###
+
+# The username for infiniband switch
+# The username must not contain -,\, ',"
+ib_username: "admin"
+
+# Password used for infiniband switch
+# The password must not contain -,\, ',"
+ib_password: "admin"
+
+### Usage: powervault_me4 ###
+
+# The username for powervault_me4
+# The username must not contain -,\, ',"
+powervault_me4_username: "manage"
+
+# Password used for powervault_me4
+# The password should have atleast one uppercase character, one lowercase character,
+# one numeric character and one non-alphanumeric character.
+# The password must not contain -,\, ',", . , < , comma(,)
+powervault_me4_password: "Test@123"

+ 94 - 0
control_plane/test/test_vars/test_control_plane_vars.yml

@@ -0,0 +1,94 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+#usage: test_control_plane_validation.yml
+
+port_no: 22
+os_name: CentOS
+os_version: '8.4'
+internet_status: "Failed. No Internet connection. Make sure network is up."
+check_os_success_msg: "OS and Version are supported"
+check_os_fail_msg: "Unsupported OS or OS version. OS should be {{ os_name }} and Version should be {{ os_version }} or more"
+
+input_params_folder: "../input_params/"
+control_plane_dir: "../"
+control_plane_validation_script_path: test_control_plane_validation.yml
+
+input_files_tc01:
+  - "test_vars/base_vars.yml"
+  - "test_vars/login_vars.yml"
+
+input_files_tc02:
+  - "test_vars/login_vars.yml"
+
+input_config_check_success_msg: "control_plane.yml validation passed"
+input_config_check_fail_msg: "control_plane.yml validation failed"
+
+install_package_success_msg: "{{item}} is installed"
+install_package_fail_msg: "{{item}} is not installed"
+login_vars_filename: "../input_params/login_vars.yml"
+login_vars_fail_msg: "Login vars is not encrypted"
+login_vars_success_msg: "Login vars is encrypted"
+
+fw_update_false: "firmware_update_required: false"
+fw_update_true: "firmware_update_required: true"
+fw_success_validation: "Validation Success for firmware update"
+fw_fail_validation: "Validation Failed for firmware update"
+docker_success_validation: "Docker Validated successfully"
+docker_fail_validation: "Docker not validated"
+
+test_package: 'jq'
+check_pods_success_msg: "Pod is running"
+check_pods_fail_msg: "Pods is not running"
+nfs_share_success_msg: "NFS Server is running"
+nfs_share_fail_msg: "NFS Server is not running"
+
+nfs_mount_Path: "/var/nfs_repo"
+nfs_mount_success_msg: "NFS repo is mounted"
+nfs_mount_fail_msg: "NFS repo is not mounted"
+check_iso_path: '/var/nfs_repo/unattended_centos7.iso'
+check_iso_success_msg: "ISO is present in the NFS repo"
+check_iso_fail_msg: "ISO is not present in the NFS repo"
+
+pod_service_check_fail_msg: "Service is not running"
+pod_service_check_success_msg: "Service is up and running"
+network_config_pod_success_msg: "Network-Config Pod is running"
+network_config_pod_fail_msg: "Network-Config Pod is not running"
+awx_pod_success_msg: "awx pod is up and running."
+awx_pod_fail_msg: "awx pod is not running"
+pvc_stat_success_msg: "pvc stat is running"
+pvc_stat_fail_msg: "pvc stat is not running"
+svc_stat_success_msg: "svc stat is running"
+svc_stat_fail_msg: "svc stat is not running"
+svc_conn_success_msg: "Connection to svc is successful at"
+svc_conn_fail_msg: "Connection to svc failed at: "
+cobbler_pod_success_msg: "Cobbler service is running"
+cobbler_pod_fail_msg: "Cobbler service is not running"
+subnet_manager_success_msg: "Subnet Manager is running"
+subnet_manager_fail_msg: "Subnet Manager is not running"
+cron_jobs_success_msg: "Cron jobs are running"
+cron_jobs_fail_msg: "Cron jobs are not running"
+container_info: "CONTAINER_RUNNING"
+ethernet_true: "ethernet_switch_support: true"
+ethernet_false: "ethernet_switch_support: false"
+powervault_true: "powervault_support: true"
+powervault_false: "powervault_support: false"
+infiniband_true: "ib_switch_support: true"
+infiniband_false: "ib_switch_support: false"
+# Update
+docker_user: "User"
+docker_password: "Password"
+valid_docker_creds: "Credentials are valid"
+invalid_docker_creds: "Please input valid docker username and password in test_control_plane_vars.yml"

+ 56 - 0
control_plane/test/test_vars/test_ethernet_vars.yml

@@ -0,0 +1,56 @@
+#  Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+---
+# Usage : test_ethernet_facts.yml
+failed_msg: "Unexpected scenario"
+success_message: "Execution successful"
+eth_inventory_name: "ethernet_inventory"
+eth_host_name: "100.96.23.241"
+fact_template_value: "ethernet_template"
+fact_job_name: "ethernet_template"
+eth_facts_playbook_path: "control_plane/tools/ethernet_facts.yml"
+awx_script_path: "test_prepare.yml"
+random_ip: 100.100.100.100
+invalid_username: "invalid_username"
+username: admin
+password: admin
+
+# Usage : test_ethernet_config.yml
+ethernet_dir: "ethernet.yml"
+ethernet_config_dir: "../input_params/ethernet_vars.yml"
+ethernet_config_backup_dir: "ethernet_config_backup.yml"
+get_mtu_dir: "test_eth_mtu.yml"
+appliance_dir: "/root/ethernet/control_plane"
+fail_case: "Expected error, please check the configurations"
+sed_condition: '/Port 4/a mtu2345'
+eth_template_value: "ethernet_template"
+eth_job_name: "ethernet_template"
+eth_playbook_path: "control_plane/ethernet.yml"
+inventory_dir: "test_ethernet_inventory"
+login_vars_path: "../input_params/login_vars.yml"
+login_vars_vault_path: "../input_params/.login_vault_key"
+tower_config_file_path: "../roles/webui_awx/files/.tower_cli.cfg"
+tower_vault_file_path: "../roles/webui_awx/files/.tower_vault_key"
+file_perm: '0644'
+
+# Usage : test_eth_mtu.yml, test_ethernet_config.yml
+validation_port: 1/1/4:1
+port_num: 4
+search_line: "    ethernet 1/1/5:"
+add_mtu_line: "      mtu: 2345"
+time_to_pause: 4
+validate_mtu_line: "MTU 2345 bytes"
+ethernet_success_msg: "successful"
+ethernet_fail_msg: "failed"

+ 3 - 0
examples/host_mapping_file_one_touch.csv

@@ -0,0 +1,3 @@
+MAC,Hostname,IP,Component_role
+xx:yy:zz:aa:bb,server,1.2.3.4,manager
+aa:bb:cc:dd:ee,server2,10.10.11.12,nfs_node

+ 3 - 0
examples/host_mapping_file_os_provisioning.csv

@@ -0,0 +1,3 @@
+MAC,Hostname,IP
+xx:yy:zz:aa:bb,server,1.2.3.4
+aa:bb:cc:dd:ee,server2,10.10.11.12

+ 2 - 0
examples/mapping_device_file.csv

@@ -0,0 +1,2 @@
+MAC,IP
+xx:yy:zz:aa:bb,1.2.3.4

+ 0 - 2
examples/mapping_file.csv

@@ -1,2 +0,0 @@
-MAC,Hostname,IP
-xx:yy:zz:aa:bb,server,1.2.3.4