瀏覽代碼

Merge branch 'devel' of https://github.com/dellhpc/omnia into issue-116

Luke Wilson 4 年之前
父節點
當前提交
0c1fcd21b4
共有 100 個文件被更改,包括 6486 次插入1574 次删除
  1. 5 5
      .github/workflows/ansible-lint.yml
  2. 42 0
      appliance/appliance_config.yml
  3. 4 8
      appliance/inventory.yml
  4. 19 3
      appliance/roles/common/tasks/docker_installation.yml
  5. 6 0
      appliance/roles/common/tasks/main.yml
  6. 1 1
      appliance/roles/common/tasks/package_installation.yml
  7. 304 0
      appliance/roles/common/tasks/password_config.yml
  8. 3 3
      appliance/roles/common/tasks/pre_requisite.yml
  9. 42 8
      appliance/roles/common/vars/main.yml
  10. 47 0
      appliance/roles/inventory/files/add_host.yml
  11. 135 0
      appliance/roles/inventory/files/create_inventory.yml
  12. 95 0
      appliance/roles/inventory/tasks/main.yml
  13. 16 0
      appliance/roles/inventory/vars/main.yml
  14. 0 1
      appliance/roles/provision/files/.users.digest
  15. 8 10
      appliance/roles/provision/files/Dockerfile
  16. 4 4
      appliance/roles/provision/files/settings
  17. 0 20
      appliance/roles/provision/files/dnsmasq.template
  18. 0 19
      appliance/roles/provision/files/first-sync.sh
  19. 0 18
      appliance/roles/provision/files/ifcfg-eno1
  20. 43 0
      appliance/roles/provision/files/inventory_creation.yml
  21. 64 12
      appliance/roles/provision/files/kickstart.yml
  22. 27 0
      appliance/roles/provision/files/start_cobbler.yml
  23. 64 0
      appliance/roles/provision/files/temp_centos7.ks
  24. 0 51
      appliance/roles/provision/files/temp_centos8.ks
  25. 9 8
      appliance/roles/provision/files/dhcp.template
  26. 46 0
      appliance/roles/provision/files/tftp.yml
  27. 40 3
      appliance/roles/provision/tasks/check_prerequisites.yml
  28. 35 4
      appliance/roles/provision/tasks/configure_cobbler.yml
  29. 60 0
      appliance/roles/provision/tasks/dhcp_configure.yml
  30. 1 1
      appliance/roles/provision/tasks/firewall_settings.yml
  31. 15 10
      appliance/roles/provision/tasks/main.yml
  32. 84 0
      appliance/roles/provision/tasks/mapping_file.yml
  33. 3 3
      appliance/roles/provision/tasks/mount_iso.yml
  34. 25 82
      appliance/roles/provision/tasks/provision_password.yml
  35. 13 21
      appliance/roles/provision/vars/main.yml
  36. 164 33
      appliance/roles/web_ui/files/awx_configuration.yml
  37. 0 118
      appliance/roles/web_ui/tasks/awx_password.yml
  38. 0 8
      appliance/roles/web_ui/tasks/check_prerequisites.yml
  39. 22 0
      appliance/roles/web_ui/tasks/clone_awx.yml
  40. 27 8
      appliance/roles/web_ui/tasks/install_awx.yml
  41. 4 2
      appliance/roles/web_ui/tasks/install_awx_cli.yml
  42. 14 8
      appliance/roles/web_ui/tasks/main.yml
  43. 85 0
      appliance/roles/web_ui/tasks/ui_accessibility.yml
  44. 14 19
      appliance/roles/web_ui/vars/main.yml
  45. 42 0
      appliance/test/appliance_config_empty.yml
  46. 42 0
      appliance/test/appliance_config_test.yml
  47. 0 3
      appliance/test/cobbler_inventory
  48. 3 0
      appliance/test/provisioned_hosts.yml
  49. 1661 15
      appliance/test/test_common.yml
  50. 891 0
      appliance/test/test_omnia.yml
  51. 365 67
      appliance/test/test_provision_cc.yml
  52. 75 350
      appliance/test/test_provision_cdip.yml
  53. 90 41
      appliance/test/test_provision_ndod.yml
  54. 22 11
      appliance/test/test_vars/test_common_vars.yml
  55. 24 0
      appliance/test/test_vars/test_omnia_vars.yml
  56. 22 9
      appliance/test/test_vars/test_provision_vars.yml
  57. 8 12
      appliance/test/test_vars/test_web_ui_vars.yml
  58. 281 169
      appliance/test/test_web_ui.yml
  59. 0 212
      appliance/test/test_web_ui_awxc.yml
  60. 40 0
      appliance/tools/passwordless_ssh.yml
  61. 25 0
      appliance/tools/provision_host_report.j2
  62. 83 0
      appliance/tools/provision_report.yml
  63. 36 0
      appliance/tools/roles/cluster_preperation/tasks/main.yml
  64. 78 0
      appliance/tools/roles/cluster_preperation/tasks/passwordless_ssh.yml
  65. 19 0
      appliance/tools/roles/cluster_preperation/vars/main.yml
  66. 42 0
      appliance/tools/roles/fetch_password/tasks/main.yml
  67. 97 0
      docs/INSTALL_OMNIA.md
  68. 174 0
      docs/INSTALL_OMNIA_APPLIANCE.md
  69. 6 0
      docs/INVENTORY
  70. 81 0
      docs/MONITOR_CLUSTERS.md
  71. 28 0
      docs/PREINSTALL_OMNIA.md
  72. 37 0
      docs/PREINSTALL_OMNIA_APPLIANCE.md
  73. 51 7
      docs/README.md
  74. 55 18
      omnia.yml
  75. 24 0
      omnia_config.yml
  76. 17 13
      platforms/roles/kubeflow/tasks/main.yml
  77. 0 22
      platforms/roles/kubeflow/vars/main.yml
  78. 87 0
      roles/cluster_validation/tasks/fetch_password.yml
  79. 19 0
      roles/cluster_validation/tasks/main.yml
  80. 30 0
      roles/cluster_validation/tasks/validations.yml
  81. 32 0
      roles/cluster_validation/vars/main.yml
  82. 1 1
      roles/compute_gpu/files/daemon.json
  83. 20 0
      roles/common/files/inventory.fact
  84. 17 12
      roles/common/handlers/main.yml
  85. 35 0
      roles/common/tasks/amd.yml
  86. 21 45
      roles/common/tasks/main.yml
  87. 25 25
      roles/common/tasks/ntp.yml
  88. 37 17
      roles/compute_gpu/tasks/main.yml
  89. 1 2
      roles/common/templates/chrony.conf.j2
  90. 1 3
      roles/common/templates/ntp.conf.j2
  91. 21 12
      roles/common/vars/main.yml
  92. 0 3
      roles/compute_gpu/files/k8s.conf
  93. 0 8
      roles/compute_gpu/files/kubernetes.repo
  94. 0 0
      roles/k8s_common/files/k8s.conf
  95. 0 0
      roles/k8s_common/files/kubernetes.repo
  96. 28 0
      roles/k8s_common/handlers/main.yml
  97. 69 0
      roles/k8s_common/tasks/main.yml
  98. 27 0
      roles/k8s_common/vars/main.yml
  99. 6 6
      roles/firewalld/tasks/main.yml
  100. 0 0
      roles/firewalld/vars/main.yml

+ 5 - 5
.github/workflows/ansible-lint.yml

@@ -17,7 +17,7 @@ jobs:
 
     - name: ansible-lint 
       # replace "master" with any valid ref
-      uses: ansible/ansible-lint-action@151b9a2
+      uses: ansible/ansible-lint-action@master
       with:
         # [required]
         # Paths to ansible files (i.e., playbooks, tasks, handlers etc..)
@@ -28,10 +28,10 @@ jobs:
         #   playbook_1.yml
         #   playbook_2.yml
         targets: |
-          /github/workspace/kubernetes/jupyterhub.yml
-          /github/workspace/kubernetes/kubeflow.yml
-          /github/workspace/kubernetes/kubernetes.yml
-          /github/workspace/slurm/slurm.yml
+          /github/workspace/omnia.yml
+          /github/workspace/omnia_config.yml
+          /github/workspace/platforms/jupyterhub.yml
+          /github/workspace/platforms/kubeflow.yml
           /github/workspace/tools/install_tools.yml
         # [optional]
         # Arguments to override a package and its version to be set explicitly.

+ 42 - 0
appliance/appliance_config.yml

@@ -0,0 +1,42 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Password used while deploying OS on bare metal servers and for Cobbler UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+provision_password: ""
+
+# Password used for the AWX UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+awx_password: ""
+
+# The nic/ethernet card that needs to be connected to the HPC switch.
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is em1.
+hpc_nic: "em1"
+
+# The nic/ethernet card that will be connected to the public internet.
+# Default value of nic is em2
+public_nic: "em2"
+
+# The mapping file consists of the MAC address and its respective IP address and hostname.
+# If user wants to provide a mapping file, set this value to "true"
+# The format of mapping file should be MAC,hostname,IP and must be a CSV file.
+mapping_file_exists: ""
+
+# The dhcp range for assigning the IP address to the baremetal nodes.
+dhcp_start_ip_range: ""
+dhcp_end_ip_range: ""

+ 4 - 8
appliance/inventory.yml

@@ -1,4 +1,4 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,13 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-
-# inventory playbook. Will be updated later
-- name: omnia
+- name: Dynamic Inventory
   hosts: localhost
   connection: local
   gather_facts: no
-  tasks:
-    - name: Hello
-      debug:
-        msg: "Hello inventory.yml"
+  roles:
+    - inventory

+ 19 - 3
appliance/roles/common/tasks/docker_installation.yml

@@ -30,8 +30,8 @@
 
 - name: Install docker
   package:
-    name: "{{ container_repo_install }}" 
-    state: latest
+    name: "{{ container_repo_install }}"
+    state: present
   become: yes
   tags: install
 
@@ -43,6 +43,22 @@
   become: yes
   tags: install
 
+- name: Uninstall docker-py using pip
+  pip:
+    name: ['docker-py','docker']
+    state: absent
+  tags: install
+
+- name: Install docker using pip
+  pip:
+    name: docker
+    state: present
+  tags: install
+
+- name: Update pip
+  command: pip3 install --upgrade pip
+  changed_when: false
+
 - name: Installation using python3
   pip:
     name: "{{ docker_compose }}"
@@ -57,5 +73,5 @@
 
 - name: Restart docker
   service:
-    name: docker 
+    name: docker
     state: restarted

+ 6 - 0
appliance/roles/common/tasks/main.yml

@@ -12,6 +12,9 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 ---
+- name: Mount Path
+  set_fact:
+    mount_path: "{{ role_path + '/../../..'  }}"
 
 - name: Pre-requisite validation
   import_tasks: pre_requisite.yml
@@ -22,6 +25,9 @@
 - name: Common packages installation
   import_tasks: package_installation.yml
 
+- name: Basic Configuration
+  import_tasks: password_config.yml
+
 - name: Docker installation and configuration
   import_tasks: docker_installation.yml
 

+ 1 - 1
appliance/roles/common/tasks/package_installation.yml

@@ -16,5 +16,5 @@
 - name: Install packages
   package:
     name: "{{ common_packages }}"
-    state: latest
+    state: present
   tags: install

+ 304 - 0
appliance/roles/common/tasks/password_config.yml

@@ -0,0 +1,304 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Check input config file is encrypted
+  command: cat {{ input_config_filename }}
+  changed_when: false
+  register: config_content
+
+- name: Decrpyt appliance_config.yml
+  command: >-
+    ansible-vault decrypt {{ input_config_filename }}
+    --vault-password-file {{ vault_filename }}
+  changed_when: false
+  when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+
+- name: Include variable file appliance_config.yml
+  include_vars: "{{ input_config_filename }}"
+  no_log: true
+
+- name: Validate input parameters are not empty
+  fail:
+    msg: "{{ input_config_failure_msg }}"
+  register: input_config_check
+  when:
+    - provision_password | length < 1 or
+      awx_password | length < 1 or
+      hpc_nic | length < 1 or
+      public_nic | length < 1 or
+      dhcp_start_ip_range | length < 1 or
+      dhcp_end_ip_range | length < 1
+
+- name: Save input variables from file
+  set_fact:
+    cobbler_password: "{{ provision_password }}"
+    admin_password: "{{ awx_password }}"
+    nic:  "{{ hpc_nic }}"
+    internet_nic: "{{ public_nic }}"
+    dhcp_start_ip: "{{ dhcp_start_ip_range | ipv4 }}"
+    dhcp_end_ip: "{{ dhcp_end_ip_range | ipv4 }}"
+    mapping_file: "{{ mapping_file_exists }}"
+  no_log: true
+
+- name: Get the system hpc ip
+  shell:  "ifconfig {{ hpc_nic }} | grep 'inet' |cut -d: -f2 |  awk '{ print $2}'"
+  register: ip
+  changed_when: false
+
+- name: Get the system public ip
+  shell:  "ifconfig {{ internet_nic }} | grep 'inet' |cut -d: -f2 |  awk '{ print $2}'"
+  register: internet_ip
+  changed_when: false
+
+- name: Get the system netmask
+  shell:  "ifconfig {{ hpc_nic }} | grep 'inet' |cut -d: -f2 |  awk '{ print $4}'"
+  register: net
+  changed_when: false
+
+- name: HPC nic IP
+  set_fact:
+    hpc_ip: "{{ ip.stdout }}"
+    public_ip: "{{ internet_ip.stdout }}"
+
+- name:  Netmask
+  set_fact:
+    netmask: "{{ net.stdout }}"
+
+- name: shell try
+  shell: |
+    IFS=. read -r i1 i2 i3 i4 <<< "{{ hpc_ip }}"
+    IFS=. read -r m1 m2 m3 m4 <<< "{{ netmask }}"
+    printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
+  register: sub_result
+  changed_when: false
+
+- name: Subnet
+  set_fact:
+    subnet: "{{ sub_result.stdout }}"
+
+- name: Assert provision_password
+  assert:
+    that:
+      - cobbler_password | length > min_length | int - 1
+      - cobbler_password | length < max_length | int + 1
+      - '"-" not in cobbler_password '
+      - '"\\" not in cobbler_password '
+      - '"\"" not in cobbler_password '
+      - " \"'\" not in cobbler_password "
+    success_msg: "{{ success_msg_provision_password }}"
+    fail_msg: "{{ fail_msg_provision_password }}"
+  no_log: true
+  register: cobbler_password_check
+
+- name: Assert awx_password
+  assert:
+    that:
+        - admin_password | length > min_length | int - 1
+        - admin_password | length < max_length | int + 1
+        - '"-" not in admin_password '
+        - '"\\" not in admin_password '
+        - '"\"" not in admin_password '
+        - " \"'\" not in admin_password "
+    success_msg: "{{ success_msg_awx_password }}"
+    fail_msg: "{{ fail_msg_awx_password }}"
+  no_log: true
+  register: awx_password_check
+
+- name: Assert hpc_ip
+  assert:
+    that:
+      - hpc_ip | length > 7
+    success_msg: "{{ success_hpc_ip }}"
+    fail_msg: "{{ fail_hpc_ip }}"
+  register: hpc_ip_check
+
+- name: Assert public_ip
+  assert:
+    that:
+      - public_ip | length > 7
+    success_msg: "{{ success_hpc_ip }}"
+    fail_msg: "{{ fail_hpc_ip }}"
+  register: public_ip_check
+
+- name: Assert hpc_nic
+  assert:
+    that:
+      - nic | length > nic_min_length | int - 1
+      - nic != internet_nic
+    success_msg: "{{ success_msg_hpc_nic }}"
+    fail_msg: "{{ fail_msg_hpc_nic }}"
+  register: hpc_nic_check
+
+- name: Assert public_nic
+  assert:
+    that:
+      - internet_nic | length > nic_min_length | int - 1
+      - nic != internet_nic
+    success_msg: "{{ success_msg_public_nic }}"
+    fail_msg: "{{ fail_msg_public_nic }}"
+  register: public_nic_check
+
+- name: Assert mapping_file_exists
+  assert:
+    that:
+      - "( mapping_file == true) or ( mapping_file == false)"
+    success_msg: "{{ success_mapping_file }}"
+    fail_msg: "{{ fail_mapping_file }}"
+  register: mapping_file_check
+
+- name: Check the subnet of dhcp start range
+  shell: |
+    IFS=. read -r i1 i2 i3 i4 <<< "{{ dhcp_start_ip }}"
+    IFS=. read -r m1 m2 m3 m4 <<< "{{ netmask }}"
+    printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
+  args:
+    warn: no
+  register: dhcp_start_sub_result
+  changed_when: false
+  when: dhcp_start_ip != "false"
+
+- name: Set the start dhcp subnet
+  set_fact:
+    dhcp_start_sub: "{{ dhcp_start_sub_result.stdout }}"
+  when: dhcp_start_ip != "false"
+
+- name: Check the subnet of dhcp end range
+  shell: |
+    IFS=. read -r i1 i2 i3 i4 <<< "{{ dhcp_end_ip }}"
+    IFS=. read -r m1 m2 m3 m4 <<< "{{ netmask }}"
+    printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
+  register: dhcp_end_sub_result
+  when: dhcp_end_ip != "false"
+  changed_when: false
+
+- name: Set the end dhcp subnet
+  set_fact:
+    dhcp_end_sub: "{{ dhcp_end_sub_result.stdout }}"
+  when: dhcp_end_ip != "false"
+
+- name: Assert dhcp_start_ip_range
+  assert:
+    that:
+      - dhcp_start_ip != "false"
+      - dhcp_start_ip != dhcp_end_ip
+      - dhcp_start_sub == subnet
+      - dhcp_start_sub == dhcp_end_sub
+    success_msg: "{{ success_dhcp_range }}"
+    fail_msg: "{{ fail_dhcp_range }}"
+  register: dhcp_start_ip_check
+
+- name: Assert dhcp_end_ip_range
+  assert:
+    that:
+      - dhcp_end_ip != "false"
+      - dhcp_start_ip != dhcp_end_ip
+      - dhcp_end_sub == subnet
+      - dhcp_start_sub == dhcp_end_sub
+    success_msg: "{{ success_dhcp_range }}"
+    fail_msg: "{{ fail_dhcp_range }}"
+  register: dhcp_end_ip_check
+
+- name: Create ansible vault key
+  set_fact:
+    vault_key: "{{ lookup('password', '/dev/null chars=ascii_letters') }}"
+  when: "'$ANSIBLE_VAULT;' not in config_content.stdout"
+
+- name: Save vault key
+  copy:
+    dest: "{{ vault_filename }}"
+    content: |
+      {{ vault_key }}
+    owner: root
+    force: yes
+  when: "'$ANSIBLE_VAULT;' not in config_content.stdout"
+
+- name: Encrypt input config file
+  command: >-
+    ansible-vault encrypt {{ input_config_filename }}
+    --vault-password-file {{ vault_filename }}
+  changed_when: false
+
+- name: Check if omnia_vault_key exists
+  stat:
+    path: "{{ role_path }}/../../../{{ config_vaultname }}"
+  register: vault_key_result
+
+- name: Create ansible vault key if it does not exist
+  set_fact:
+    vault_key: "{{ lookup('password', '/dev/null chars=ascii_letters') }}"
+  when: not vault_key_result.stat.exists
+
+- name: Save vault key
+  copy:
+    dest: "{{ role_path }}/../../../{{ config_vaultname }}"
+    content: |
+      {{ vault_key }}
+    owner: root
+    force: yes
+  when: not vault_key_result.stat.exists
+
+- name: Check if omnia config file is encrypted
+  command: cat {{ role_path }}/../../../{{ config_filename }}
+  changed_when: false
+  register: config_content
+  no_log: True
+
+- name: Decrpyt omnia_config.yml
+  command: >-
+    ansible-vault decrypt {{ role_path }}/../../../{{ config_filename }}
+    --vault-password-file {{ role_path }}/../../../{{ config_vaultname }}
+  when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+
+- name: Include variable file omnia_config.yml
+  include_vars: "{{ role_path }}/../../../{{ config_filename }}"
+  no_log: True
+
+- name: Validate input parameters are not empty
+  fail:
+    msg: "{{ input_config_failure_msg }}"
+  register: input_config_check
+  when:
+    - mariadb_password | length < 1 or
+      k8s_cni | length < 1
+
+- name: Assert mariadb_password
+  assert:
+    that:
+        - mariadb_password | length > min_length | int - 1
+        - mariadb_password | length < max_length | int + 1
+        - '"-" not in mariadb_password '
+        - '"\\" not in mariadb_password '
+        - '"\"" not in mariadb_password '
+        - " \"'\" not in mariadb_password "
+    success_msg: "{{ success_msg_mariadb_password }}"
+    fail_msg: "{{ fail_msg_mariadb_password }}"
+
+- name: Assert kubernetes cni
+  assert:
+    that: "('calico' in k8s_cni) or ('flannel' in k8s_cni)"
+    success_msg: "{{ success_msg_k8s_cni }}"
+    fail_msg: "{{ fail_msg_k8s_cni }}"
+
+- name: Save input variables from file
+  set_fact:
+    db_password: "{{ mariadb_password }}"
+    k8s_cni: "{{ k8s_cni }}"
+  no_log: True
+
+- name: Encrypt input config file
+  command: >-
+    ansible-vault encrypt {{ role_path }}/../../../{{ config_filename }}
+    --vault-password-file {{ role_path }}/../../../{{ config_vaultname }}
+  changed_when: false

+ 3 - 3
appliance/roles/common/tasks/pre_requisite.yml

@@ -20,8 +20,8 @@
     replace: 'log_path = /var/log/omnia.log'
   tags: install
 
-- name: Check OS support 
-  fail: 
+- name: Check OS support
+  fail:
     msg: "{{ os_status }}"
   when: not(ansible_distribution == os_name and ansible_distribution_version >= os_version)
   register: os_value
@@ -33,7 +33,7 @@
   tags: install
 
 - name: Status of SElinux
-  fail: 
+  fail:
     msg: "{{ selinux_status }}"
   when: ansible_selinux.status != 'disabled'
   register: selinux_value

+ 42 - 8
appliance/roles/common/vars/main.yml

@@ -15,7 +15,7 @@
 
 # vars file for common
 
-# Usage: tasks/package_installation.yml
+# Usage: package_installation.yml
 common_packages:
   - epel-release
   - yum-utils
@@ -25,23 +25,29 @@ common_packages:
   - nodejs
   - device-mapper-persistent-data
   - bzip2
+  - python2-pip
   - python3-pip
   - nano
   - lvm2
   - gettext
+  - python-docker
+  - net-tools
+  - python-netaddr
 
-# Usage: tasks/pre_requisite.yml
+# Usage: pre_requisite.yml
 internet_delay: 0
 internet_timeout: 1
 hostname: github.com
 port_no: 22
 os_name: CentOS
-os_version: '8' 
-internet_status: "Failed:No Internet connection.Connect to Internet."
-os_status: "Unsupported OS or OS version.OS must be {{ os_name }} and Version must be {{ os_version }} or more"
+os_version: '7.9' 
+internet_status: "Failed. No Internet connection. Make sure network is up."
+os_status: "Unsupported OS or OS version. OS should be {{ os_name }} and Version should be {{ os_version }} or more"
 selinux_status: "SElinux is not disabled. Disable it in /etc/sysconfig/selinux and reboot the system"
+iso_name: CentOS-7-x86_64-Minimal-2009.iso
+iso_fail: "Iso file not found. Download and copy the iso file to omnia/appliance/roles/provision/files"
 
-# Usage: tasks/docker_installation.yml
+# Usage: docker_installation.yml
 docker_repo_url: https://download.docker.com/linux/centos/docker-ce.repo
 docker_repo_dest: /etc/yum.repos.d/docker-ce.repo
 success: '0'
@@ -50,5 +56,33 @@ container_repo_install: docker-ce
 docker_compose: docker-compose
 daemon_dest: /etc/docker/
 
-# Usage: tasks/docker_volume.yml
-docker_volume_name: omnia-storage
+# Usage: docker_volume.yml
+docker_volume_name: omnia-storage
+
+# Usage: password_config.yml
+input_config_filename: "appliance_config.yml"
+fail_msg_provision_password: "Failed. Incorrect provision_password format provided in appliance_config.yml file"
+success_msg_provision_password: "provision_password validated"
+fail_msg_awx_password: "Failed. Incorrect awx_password format provided in appliance_config.yml file"
+success_msg_awx_password: "awx_password validated"
+fail_msg_hpc_nic: "Failed. Incorrect hpc_nic format provided in appliance_config.yml file"
+success_msg_hpc_nic: "hpc_nic validated"
+fail_msg_public_nic: "Failed. Incorrect public_nic format provided in appliance_config.yml file"
+success_msg_public_nic: "public_nic validated"
+success_mapping_file: "mapping_file_exists validated"
+fail_mapping_file: "Failed. Incorrect mapping_file_exists value in appliance_config.yml. It should be either true or false"
+input_config_failure_msg: "Please provide all the required parameters in appliance_config.yml"
+success_dhcp_range: "Dhcp_range validated"
+fail_dhcp_range: "Failed. Incorrect range assigned for dhcp"
+success_hpc_ip: "IP validated"
+fail_hpc_ip: "Failed. Nic should be configured"
+min_length: 8
+max_length: 30
+nic_min_length: 3
+vault_filename: .vault_key
+config_filename: "omnia_config.yml"
+config_vaultname: .omnia_vault_key
+fail_msg_mariadb_password: "Failed. Incorrect mariadb_password format provided in omnia_config.yml file"
+success_msg_mariadb_password: "mariadb_password validated"
+success_msg_k8s_cni: "Kubernetes CNI Validated"
+fail_msg_k8s_cni: "Failed. Kubernetes CNI is incorrect in omnia_config.yml"

+ 47 - 0
appliance/roles/inventory/files/add_host.yml

@@ -0,0 +1,47 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Check if host already exists
+  command: awk "{{ '/'+ item + '/' }}" /root/inventory
+  register: check_host
+  changed_when: no
+
+- name: Initialise host description
+  set_fact:
+    host_description: "Description Unavailable"
+
+- name: Fetch description
+  set_fact:
+    host_description: "CPU:{{ hostvars[item]['ansible_processor_count'] }}
+    Cores:{{ hostvars[item]['ansible_processor_cores'] }}
+    Memory:{{ hostvars[item]['ansible_memtotal_mb'] }}MB
+    BIOS:{{ hostvars[item]['ansible_bios_version'] }}"
+  when: not check_host.stdout | regex_search(item)
+  ignore_errors: yes
+
+- name: Add host
+  lineinfile:
+    path:  "/root/inventory"
+    line: "    {{ item }}:\n      _awx_description: {{ host_description }}"
+  when:
+    - not check_host.stdout | regex_search(item)
+    - host_description != "Description Unavailable"
+
+- name: Host added msg
+  debug:
+    msg: "{{ host_added_msg + item }}"
+  when:
+    - not check_host.stdout | regex_search(item)
+    - host_description != "Description Unavailable"

+ 135 - 0
appliance/roles/inventory/files/create_inventory.yml

@@ -0,0 +1,135 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Find reachable hosts
+  hosts: all
+  gather_facts: false
+  ignore_unreachable: true
+  ignore_errors: true
+  tasks:
+    - name: Check for reachable nodes
+      command: ping -c1 {{ inventory_hostname }}
+      delegate_to: localhost
+      register: ping_result
+      ignore_errors: yes
+      changed_when: false
+
+    - name: Refresh ssh keys
+      command: ssh-keygen -R {{ inventory_hostname }}
+      delegate_to: localhost
+      changed_when: false
+
+    - name: Group reachable hosts
+      group_by:
+        key: "reachable"
+      when: "'100% packet loss' not in ping_result.stdout"
+
+- name: Get provision password
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Include vars file of inventory role
+      include_vars: ../vars/main.yml
+
+- name: Set hostname on reachable nodes and gather facts
+  hosts: reachable
+  gather_facts: False
+  ignore_unreachable: true
+  remote_user: "{{ cobbler_username }}"
+  vars:
+    ansible_password: "{{ cobbler_password }}"
+    ansible_become_pass: "{{ cobbler_password }}"
+    ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+    mapping_file_present: ""
+  tasks:
+    - name: Setup
+      setup:
+       filter: ansible_*
+
+    - name: Check hostname of server
+      command: hostname
+      register: hostname_check
+      changed_when: false
+      ignore_errors: true
+
+    - name: Check if IP present in mapping file
+      command: grep "{{ inventory_hostname }}" ../../provision/files/new_mapping_file.csv
+      delegate_to: localhost
+      register: file_present
+      when: mapping_file | bool == true
+      ignore_errors: true
+
+    - name: Set fact if mapping file present
+      set_fact:
+        mapping_file_present: "{{ file_present.stdout }}"
+      when: mapping_file | bool == true
+      ignore_errors: true
+
+    - name: Get the static hostname from mapping file
+      shell: awk -F',' '$3 == "{{ inventory_hostname }}" { print $2 }' ../../provision/files/new_mapping_file.csv
+      delegate_to: localhost
+      when: ('localhost' in hostname_check.stdout) and (mapping_file_present != "" ) and ( mapping_file | bool == true )
+      register: host_name
+      ignore_errors: true
+
+    - name: Set the hostname from mapping file
+      hostname:
+        name: "{{ host_name.stdout }}"
+      register: result_host_name
+      when: ('localhost' in hostname_check.stdout) and (mapping_file_present != "" ) and  (mapping_file | bool == true )
+      ignore_errors: true
+
+    - name: Set the system hostname
+      hostname:
+        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+      register: result_name
+      when: ('localhost' in hostname_check.stdout) and (mapping_file | bool == false)
+      ignore_errors: true
+
+    - name: Add new hostname to /etc/hosts from mapping file
+      lineinfile:
+        dest: /etc/hosts
+        regexp: '^127\.0\.0\.1[ \t]+localhost'
+        line: "127.0.0.1 localhost {{ host_name.stdout }}"
+        state: present
+      when: ('localhost' in hostname_check.stdout) and ( mapping_file_present != "" ) and ( mapping_file | bool == true )
+      ignore_errors: true
+
+    - name: Add new hostname to /etc/hosts
+      lineinfile:
+        dest: /etc/hosts
+        regexp: '^127\.0\.0\.1[ \t]+localhost'
+        line: "127.0.0.1 localhost 'compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}'"
+        state: present
+      when: ('localhost' in hostname_check.stdout) and (mapping_file | bool == false )
+      ignore_errors: true
+
+- name: Update inventory
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Update inventory file
+      block:
+        - name: Fetch facts and add new hosts
+          include_tasks: add_host.yml
+          with_items: "{{ groups['reachable'] }}"
+      when: "'reachable' in groups"
+
+    - name: Show unreachable hosts
+      debug:
+        msg: "{{ host_unreachable_msg }} + {{ groups['ungrouped'] }}"
+      when: "'ungrouped' in groups"

+ 95 - 0
appliance/roles/inventory/tasks/main.yml

@@ -0,0 +1,95 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Set Facts
+  set_fact:
+    ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+
+- name: Check if provisioned host file exists
+  stat:
+    path: "{{ role_path }}/files/provisioned_hosts.yml"
+  register: provisioned_file_result
+
+- name: Include vars file of common role
+  include_vars: "{{ role_path }}/../common/vars/main.yml"
+  no_log: True
+
+- name: Include vars file of web_ui role
+  include_vars: "{{ role_path }}/../web_ui/vars/main.yml"
+  no_log: True
+
+- name: Update inventory file
+  block:
+    - name: Check if input config file is encrypted
+      command: cat {{ input_config_filename }}
+      changed_when: false
+      register: config_content
+
+    - name: Decrpyt appliance_config.yml
+      command: >-
+        ansible-vault decrypt {{ input_config_filename }}
+        --vault-password-file {{ vault_filename }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+
+    - name: Include variable file appliance_config.yml
+      include_vars: "{{ input_config_filename }}"
+      no_log: True
+
+    - name: Save input variables from file
+      set_fact:
+        cobbler_password: "{{ provision_password }}"
+        mapping_file: "{{ mapping_file_exists }}"
+      no_log: True
+
+    - name: Encrypt input config file
+      command: >-
+        ansible-vault encrypt {{ input_config_filename }}
+        --vault-password-file {{ vault_filename }}
+      changed_when: false
+
+    - name: Check if inventory file already exists
+      stat:
+        path: "/root/inventory"
+      register: stat_result
+
+    - name: Create inventory file if doesnt exist
+      copy:
+        dest:  "/root/inventory"
+        content: |
+          ---
+          all:
+            hosts:
+        owner: root
+        mode: 0775
+      when: not stat_result.stat.exists
+
+    - name: Add inventory playbook
+      block:
+        - name: add hosts with description to inventory file
+          command: >-
+            ansible-playbook -i {{ role_path }}/files/provisioned_hosts.yml
+            {{ role_path }}/files/create_inventory.yml
+            --extra-vars "cobbler_username={{ cobbler_username }} cobbler_password={{ cobbler_password }} mapping_file={{ mapping_file | bool }}"
+          no_log: True
+          register: register_error
+      rescue:
+        - name: Fail if host addition was not successful
+          fail:
+            msg: "{{ register_error.stderr + register_error.stdout | regex_replace(cobbler_username) | regex_replace(cobbler_password) }}"
+
+  when: provisioned_file_result.stat.exists
+
+- name: push inventory to AWX
+  command: awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source /root/inventory
+  when: provisioned_file_result.stat.exists

+ 16 - 0
appliance/roles/inventory/vars/main.yml

@@ -0,0 +1,16 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+host_added_msg: "Added host to inventory: "
+host_unreachable_msg: "Following hosts are unreachable: "

+ 0 - 1
appliance/roles/provision/files/.users.digest

@@ -1 +0,0 @@
-cobbler:Cobbler:

+ 8 - 10
appliance/roles/provision/files/Dockerfile

@@ -15,29 +15,30 @@ RUN yum install -y \
   cobbler-web \
   ansible \
   pykickstart \
+  cronie \
   debmirror \
   curl \
-  wget \
   rsync \
   httpd\
-  dhcp\
-  dnsmasq\
+  dhcp \
   xinetd \
   net-tools \
   memtest86+ \
   && yum clean all \
   &&  rm -rf /var/cache/yum
 
+RUN mkdir /root/omnia
+
 #Copy Configuration files
 COPY settings /etc/cobbler/settings
 COPY dhcp.template  /etc/cobbler/dhcp.template
-COPY dnsmasq.template /etc/cobbler/dnsmasq.template
 COPY modules.conf  /etc/cobbler/modules.conf
 COPY tftp /etc/xinetd.d/tftp
 COPY .users.digest /etc/cobbler/users.digest
 COPY kickstart.yml /root
-COPY centos8.ks /var/lib/cobbler/kickstarts
-COPY first-sync.sh /usr/local/bin/first-sync.sh
+COPY tftp.yml /root
+COPY inventory_creation.yml /root
+COPY centos7.ks /var/lib/cobbler/kickstarts
 
 EXPOSE 69 80 443 25151
 
@@ -46,8 +47,5 @@ VOLUME [ "/var/www/cobbler", "/var/lib/cobbler/backup", "/mnt" ]
 RUN systemctl enable cobblerd
 RUN systemctl enable httpd
 RUN systemctl enable rsyncd
-RUN systemctl enable dnsmasq
-
-#RUN ansible-playbook /root/kickstart.yml
 
-CMD ["sbin/init"]
+CMD ["sbin/init"]

+ 4 - 4
appliance/roles/provision/files/settings

@@ -98,7 +98,7 @@ default_ownership:
 # The simplest way to change the password is to run
 # openssl passwd -1
 # and put the output between the "" below.
-default_password_crypted: "$1$mF86/UHC$WvcIcX2t6crBz2onWxyac."
+default_password_crypted: "password"
 
 # the default template type to use in the absence of any
 # other detected template. If you do not specify the template
@@ -243,7 +243,7 @@ manage_dhcp: 1
 
 # set to 1 to enable Cobbler's DNS management features.
 # the choice of DNS mangement engine is in /etc/cobbler/modules.conf
-manage_dns: 1
+manage_dns: 0
 
 # set to path of bind chroot to create bind-chroot compatible bind
 # configuration files.  This should be automatically detected.
@@ -275,7 +275,7 @@ manage_reverse_zones: ['172.17']
 # if using cobbler with manage_dhcp, put the IP address
 # of the cobbler server here so that PXE booting guests can find it
 # if you do not set this correctly, this will be manifested in TFTP open timeouts.
-next_server: 172.17.0.1
+next_server: ip
 
 # settings for power management features.  optional.
 # see https://github.com/cobbler/cobbler/wiki/Power-management to learn more
@@ -387,7 +387,7 @@ scm_track_mode: "git"
 # if you have a server that appears differently to different subnets
 # (dual homed, etc), you need to read the --server-override section
 # of the manpage for how that works.
-server: 172.17.0.1
+server: ip
 
 # If set to 1, all commands will be forced to use the localhost address
 # instead of using the above value which can force commands like

+ 0 - 20
appliance/roles/provision/files/dnsmasq.template

@@ -1,20 +0,0 @@
-# Cobbler generated configuration file for dnsmasq
-# $date
-#
-
-# resolve.conf .. ?
-#no-poll
-#enable-dbus
-read-ethers
-addn-hosts = /var/lib/cobbler/cobbler_hosts
-
-dhcp-range=172.17.0.10 172.17.0.254
-dhcp-option=66,$next_server
-dhcp-lease-max=1000
-dhcp-authoritative
-dhcp-boot=pxelinux.0
-dhcp-boot=net:normalarch,pxelinux.0
-dhcp-boot=net:ia64,$elilo
-
-$insert_cobbler_system_definitions
-

+ 0 - 19
appliance/roles/provision/files/first-sync.sh

@@ -1,19 +0,0 @@
-
- timeout=30
-while ! netstat -laputen | grep -i listen | grep 25151 1>/dev/null 2>&1
-do
-  sleep 1
-  timeout=$((${timeout} - 1))
-  if [ ${timeout} -eq 0 ]
-  then
-    echo "ERROR: cobblerd is not running."
-    exit 1
-  fi
-done
-sleep 2
-echo "cobbler get-loaders"
-cobbler get-loaders
-echo "cobbler sync"
-cobbler sync
-echo "cobbler check"
-cobbler check

+ 0 - 18
appliance/roles/provision/files/ifcfg-eno1

@@ -1,18 +0,0 @@
-TYPE=Ethernet
-PROXY_METHOD=none
-BROWSER_ONLY=no
-BOOTPROTO=none
-DEFROUTE=yes
-IPV4_FAILURE_FATAL=no
-IPV6INIT=yes
-IPV6_AUTOCONF=yes
-IPV6_DEFROUTE=yes
-IPV6_FAILURE_FATAL=no
-IPV6_ADDR_GEN_MODE=stable-privacy
-NAME=eno1
-UUID=468847a9-d146-4062-813b-85f74ffd6e2a
-DEVICE=eno1
-ONBOOT=yes
-IPV6_PRIVACY=no
-IPADDR=172.17.0.1
-NETMASK=255.255.0.0

+ 43 - 0
appliance/roles/provision/files/inventory_creation.yml

@@ -0,0 +1,43 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Read dhcp file
+      set_fact:
+        var: "{{ lookup('file', '/var/lib/dhcpd/dhcpd.leases').split()| unique | select| list }}"
+
+    - name: Filter the ip
+      set_fact:
+        vars_new: "{{ var| ipv4('address')| to_nice_yaml}}"
+
+    - name: Create the static ip
+      shell: awk -F',' 'NR >1{print $3}' omnia/appliance/roles/provision/files/new_mapping_file.csv > static_hosts.yml
+      changed_when: false
+      ignore_errors: true
+
+    - name: Create the dynamic inventory
+      shell: |
+        echo "[all]" >  omnia/appliance/roles/inventory/files/provisioned_hosts.yml
+        echo "{{ vars_new }}" > temp.txt
+        egrep -o '[1-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' temp.txt >>dynamic_hosts.yml
+      changed_when: false
+      ignore_errors: true
+
+    - name: Final inventory
+      shell: cat dynamic_hosts.yml static_hosts.yml| sort -ur  >> omnia/appliance/roles/inventory/files/provisioned_hosts.yml
+      changed_when: false     

+ 64 - 12
appliance/roles/provision/files/kickstart.yml

@@ -17,53 +17,105 @@
   connection: local
   gather_facts: false
   vars:
-    name_iso: CentOS8
-    distro_name: CentOS8-x86_64
-    kernel_path: /var/www/cobbler/ks_mirror/CentOS8-x86_64/isolinux/vmlinuz
-
+    name_iso: CentOS7
+    distro_name: CentOS7-x86_64
   tasks:
   - name: Inside cobbler container
     debug:
       msg: "Hiii! I am cobbler"
 
-  - name: Start services
+  - name: Start xinetd
     service:
       name: "{{ item }}"
       state: started
     loop:
       - cobblerd
-      - httpd
-      - rsyncd
       - xinetd
+      - rsyncd
       - tftp
+      - httpd
 
   - name: Cobbler get-loaders
     command: cobbler get-loaders
     changed_when: false
 
+  - name: Get fence agents
+    package:
+      name: fence-agents
+      state: present
+
+  - name: Replace in /etc/debian
+    replace:
+      path: "/etc/debmirror.conf"
+      regexp: "^@dists=\"sid\";"
+      replace: "#@dists=\"sid\";"
+
+  - name: Replace in /etc/debian
+    replace:
+      path: "/etc/debmirror.conf"
+      regexp: "^@arches=\"i386\";"
+      replace: "#@arches=\"i386\";"
+
+  - name: Adding curl
+    shell: export PATH="/usr/bin/curl:$PATH"
+
   - name: Run import command
     command: cobbler import --arch=x86_64 --path=/mnt --name="{{ name_iso }}"
     changed_when: false
 
   - name: Distro list
-    command: >-
-      cobbler distro edit --name="{{ distro_name }}" --kernel="{{ kernel_path }}" --initrd=/var/www/cobbler/ks_mirror/CentOS8-x86_64/isolinux/initrd.img
+    command: cobbler distro edit --name="{{ distro_name }}" --kernel=/var/www/cobbler/ks_mirror/CentOS7-x86_64/isolinux/vmlinuz --initrd=/var/www/cobbler/ks_mirror/CentOS7-x86_64/isolinux/initrd.img
     changed_when: false
 
   - name: Kickstart profile
-    command: cobbler profile edit --name="{{ distro_name }}" --kickstart=/var/lib/cobbler/kickstarts/centos8.ks
+    command: cobbler profile edit --name="{{ distro_name }}" --kickstart=/var/lib/cobbler/kickstarts/centos7.ks
     changed_when: false
 
   - name: Syncing of cobbler
     command: cobbler sync
     changed_when: false
+  
+  - name: Disable default apache webpage
+    blockinfile:
+      state: present
+      insertafter: '^#insert the content here for disabling the default apache webpage'
+      dest: /etc/httpd/conf/httpd.conf
+      block: |
+        <Directory />
+           Order Deny,Allow
+           Deny from all
+           Options None
+           AllowOverride None
+         </Directory>
 
-  - name: Start xinetd
+  - name: Restart cobbler
+    service:
+      name: cobblerd
+      state: restarted
+ 
+  - name: Restart httpdd
+    service:
+      name: httpd
+      state: restarted
+
+  - name: Restart xinetd
     service:
       name: xinetd
       state: restarted
 
-  - name: Start dhcp
+  - name: Restart dhcpd
     service:
       name: dhcpd
       state: restarted
+
+  - name: Add tftp cron job
+    cron:
+      name: Start tftp service
+      minute: "*"
+      job: "ansible-playbook /root/tftp.yml"
+
+  - name: Add inventory cron job
+    cron:
+      name: Create inventory
+      minute: "*/5"
+      job: "ansible-playbook /root/inventory_creation.yml"

+ 27 - 0
appliance/roles/provision/files/start_cobbler.yml

@@ -0,0 +1,27 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Start cobbler on reboot
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Wait for 2 minutes
+      pause:
+        minutes: 2
+
+    - name: Execute cobbler sync in cobbler container
+      command: docker exec cobbler cobbler sync
+      changed_when: true

+ 64 - 0
appliance/roles/provision/files/temp_centos7.ks

@@ -0,0 +1,64 @@
+#version=DEVEL
+
+# Use network installation
+url --url http://ip/cblr/links/CentOS7-x86_64/
+
+# Install OS instead of upgrade
+install
+
+# Use text install
+text
+
+# SELinux configuration
+selinux --disabled
+
+# Firewall configuration
+firewall --disabled
+
+# Do not configure the X Window System
+skipx
+
+# Run the Setup Agent on first boot
+#firstboot --enable
+ignoredisk --only-use=sda
+
+# Keyboard layouts
+keyboard us
+
+# System language
+lang en_US
+
+# Network information
+network  --bootproto=dhcp --device=nic --onboot=on
+
+# Root password
+rootpw --iscrypted password
+
+# System services
+services --enabled="chronyd"
+
+# System timezone
+timezone Asia/Kolkata --isUtc
+
+# System bootloader configuration
+bootloader --location=mbr --boot-drive=sda
+
+# Partition clearing information
+clearpart --all --initlabel --drives=sda
+
+# Clear the Master Boot Record
+zerombr
+
+# Disk Partitioning
+partition /boot/efi --asprimary --fstype=vfat --label EFI  --size=200
+partition /boot     --asprimary --fstype=ext4 --label BOOT --size=500
+partition /         --asprimary --fstype=ext4 --label ROOT --size=4096 --grow
+
+# Reboot after installation
+reboot
+
+%packages
+@core
+net-tools
+%end
+

+ 0 - 51
appliance/roles/provision/files/temp_centos8.ks

@@ -1,51 +0,0 @@
-#platform=x86, AMD64, or Intel EM64T
-#version=DEVEL
-# Firewall configuration
-firewall --disabled
-# Install OS instead of upgrade
-install
-# Use network installation
-url --url http://ip/cblr/links/CentOS8-x86_64/
-#repo --name="CentOS" --baseurl=cdrom:sr0 --cost=100
-#Root password
-rootpw --iscrypted password
-# Use graphical install
-#graphical
-#Use text mode install
-text
-#System language
-lang en_US
-#System keyboard
-keyboard us
-#System timezone
-timezone America/Phoenix --isUtc
-# Run the Setup Agent on first boot
-#firstboot --enable
-# SELinux configuration
-selinux --disabled
-# Do not configure the X Window System
-skipx
-# Installation logging level
-#logging --level=info
-# Reboot after installation
-reboot
-# System services
-services --disabled="chronyd"
-ignoredisk --only-use=sda
-# Network information
-network  --bootproto=dhcp --device=em1 --onboot=on
-# System bootloader configuration
-bootloader --location=mbr --boot-drive=sda
-# Clear the Master Boot Record
-zerombr
-# Partition clearing information
-clearpart --all --initlabel
-# Disk partitioning information
-part /boot --fstype="xfs" --size=300
-part swap --fstype="swap" --size=2048
-part pv.01 --size=1 --grow
-volgroup root_vg01 pv.01
-logvol / --fstype xfs --name=lv_01 --vgname=root_vg01 --size=1 --grow
-%packages
-@core
-%end

+ 9 - 8
appliance/roles/provision/files/dhcp.template

@@ -18,14 +18,15 @@ set vendorclass = option vendor-class-identifier;
 
 option pxe-system-type code 93 = unsigned integer 16;
 
-subnet 172.17.0.0 netmask 255.255.0.0 {
-     option routers             172.17.0.1;
-     option domain-name-servers 172.17.0.1;
-     option subnet-mask         255.255.0.0;
-     range dynamic-bootp        172.17.0.10 172.17.0.254;
-     default-lease-time         21600;
-     max-lease-time             43200;
-     next-server                $next_server;
+subnet subnet_mask netmask net_mask {
+option subnet-mask net_mask;
+range dynamic-bootp start end;
+default-lease-time  21600;
+max-lease-time  43200;
+next-server $next_server;
+#insert the static DHCP leases for configuration here
+
+
      class "pxeclients" {
           match if substring (option vendor-class-identifier, 0, 9) = "PXEClient";
           if option pxe-system-type = 00:02 {

+ 46 - 0
appliance/roles/provision/files/tftp.yml

@@ -0,0 +1,46 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Start tftp and dhcp
+  hosts: localhost
+  connection: local
+  tasks:
+    - name: Fetch tftp status
+      command: systemctl is-active tftp
+      args:
+        warn: no
+      register: tftp_status
+      ignore_errors: yes
+      changed_when: false
+
+    - name: Start tftp if inactive state
+      command: systemctl start tftp.service
+      args:
+        warn: no
+      when: "('inactive' in tftp_status.stdout) or ('unknown' in tftp_status.stdout)"
+
+    - name: Fetch dhcp status
+      command: systemctl is-active dhcpd
+      args:
+        warn: no
+      register: dhcp_status
+      ignore_errors: yes
+      changed_when: false
+
+    - name: Start dhcp if inactive state
+      command: systemctl start dhcpd.service
+      args:
+        warn: no
+      when: "('inactive' in dhcp_status.stdout) or ('unknown' in dhcp_status.stdout)"

+ 40 - 3
appliance/roles/provision/tasks/check_prerequisites.yml

@@ -13,10 +13,24 @@
 # limitations under the License.
 ---
 
+- name: Check availability of iso file
+  stat:
+    path: "{{ role_path }}/files/{{ iso_name }}"
+  register: iso_status
+  tags: install
+
+- name: Iso file not present
+  fail:
+    msg: "{{ iso_fail }}"
+  when: iso_status.stat.exists == false
+  register: iso_file_check
+  tags: install
+
 - name: Initialize variables
   set_fact:
-    cobbler_status: false
+    cobbler_container_status: false
     cobbler_image_status: false
+    cobbler_config_status: false
   tags: install
 
 - name: Inspect the cobbler image
@@ -37,8 +51,31 @@
   when: cobbler_image_result.images| length==1
   tags: install
 
-- name: Update cobbler status
+- name: Update cobbler container status
   set_fact:
-    cobbler_status: true
+    cobbler_container_status: true
   when: cobbler_result.exists
   tags: install
+
+- name: Fetch cobbler profile list
+  command: docker exec cobbler cobbler profile list
+  changed_when: false
+  register: cobbler_profile_list
+  ignore_errors: true
+  when: cobbler_container_status == true
+
+- name: Check crontab list
+  command: docker exec cobbler crontab -l
+  changed_when: false
+  register: crontab_list
+  ignore_errors: true
+  when: cobbler_container_status == true
+
+- name: Update cobbler container status
+  set_fact:
+    cobbler_config_status: true
+  when:
+    - cobbler_container_status == true
+    - "'CentOS' in cobbler_profile_list.stdout"
+    - "'* * * * * ansible-playbook /root/tftp.yml' in crontab_list.stdout"
+    - "'5 * * * * ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"

+ 35 - 4
appliance/roles/provision/tasks/configure_cobbler.yml

@@ -12,14 +12,45 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
+- name: Delete the cobbler container if exits
+  docker_container:
+    name: cobbler
+    state: absent
+  tags: install
+  when: cobbler_container_status == true and cobbler_config_status == false
 
-- name: Stop the firewall
-  service:
-    name: firewalld
-    state: stopped
+- name: Run cobbler container
+  command: "{{ cobbler_run_command }}"
+  changed_when: false
   tags: install
+  when: cobbler_container_status == true and cobbler_config_status == false
 
 - name: Configuring cobbler inside container (It may take 5-10 mins)
   command: docker exec cobbler ansible-playbook /root/kickstart.yml
   changed_when: false
   tags: install
+  when: cobbler_config_status == false
+
+- name: Schedule task
+  cron:
+    name: "start cobbler on reboot"
+    special_time: reboot
+    job: "ansible-playbook {{ role_path }}/files/start_cobbler.yml"
+  tags: install
+  when: cobbler_config_status == false
+
+- name: Execute cobbler sync in cobbler container
+  command: docker exec cobbler cobbler sync
+  changed_when: true
+  when: cobbler_config_status == true
+
+- name: Remove the files
+  file:
+    path: "{{ item }}"
+    state: absent
+  with_items:
+    - "{{ role_path }}/files/.users.digest"
+    - "{{ role_path }}/files/dhcp.template"
+    - "{{ role_path }}/files/settings"
+    - "{{ role_path }}/files/centos7.ks"
+    - "{{ role_path }}/files/new_mapping_file.csv.bak"

+ 60 - 0
appliance/roles/provision/tasks/dhcp_configure.yml

@@ -0,0 +1,60 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Create the dhcp template
+  copy:
+    src: "{{ role_path }}/files/temp_dhcp.template"
+    dest: "{{ role_path }}/files/dhcp.template"
+    mode: 0775
+  tags: install
+
+- name: Assign subnet and netmask
+  replace:
+    path: "{{ role_path }}/files/dhcp.template"
+    regexp: '^subnet subnet_mask netmask net_mask {'
+    replace: 'subnet {{ subnet }} netmask {{ netmask }} {'
+  tags: install
+
+- name: Assign netmask
+  replace:
+    path: "{{ role_path }}/files/dhcp.template"
+    regexp: '^option subnet-mask net_mask;'
+    replace: 'option subnet-mask {{ netmask }};'
+
+- name: Assign DHCP range
+  replace:
+    path: "{{ role_path }}/files/dhcp.template"
+    regexp: '^range dynamic-bootp start end;'
+    replace: 'range dynamic-bootp {{ dhcp_start_ip }} {{ dhcp_end_ip }};'
+
+- name: Create the cobbler settings file
+  copy:
+    src: "{{ role_path }}/files/cobbler_settings"
+    dest: "{{ role_path }}/files/settings"
+    mode: 0775
+  tags: install
+
+- name: Assign server ip
+  replace:
+    path: "{{ role_path }}/files/settings"
+    regexp: '^server: ip'
+    replace: 'server: {{ hpc_ip }}'
+
+- name: Assign next server ip
+  replace:
+    path: "{{ role_path }}/files/settings"
+    regexp: '^next_server: ip'
+    replace: 'next_server: {{ hpc_ip }}'
+

+ 1 - 1
appliance/roles/provision/tasks/firewall_settings.yml

@@ -45,7 +45,7 @@
 
 - name:  Permit traffic in default zone on port 69/udp
   firewalld:
-    port: 69/tcp
+    port: 69/udp
     permanent: yes
     state: enabled
   tags: install

+ 15 - 10
appliance/roles/provision/tasks/main.yml

@@ -14,8 +14,6 @@
 ---
 
 #Tasks for Deploying cobbler on the system
-- name: Configure nic
-  import_tasks: configure_nic.yml
 
 - name: Check cobbler status on machine
   include_tasks: check_prerequisites.yml
@@ -26,36 +24,43 @@
 
 - name: Modify firewall settings for Cobbler
   import_tasks: firewall_settings.yml
-  when: not cobbler_status
+  when: not cobbler_container_status
 
 - name: Include common variables
   include_vars: ../../common/vars/main.yml
-  when: not cobbler_status
+  when: not cobbler_container_status
 
 - name: Internet validation
   include_tasks: ../../common/tasks/internet_validation.yml
-  when: not cobbler_status
+  when: not cobbler_container_status
 
 - name: Provision password validation
   import_tasks: provision_password.yml
   when: not cobbler_image_status
 
+- name: Dhcp Configuration
+  import_tasks: dhcp_configure.yml
+  when: not cobbler_image_status
+
+- name: Mapping file validation
+  import_tasks: mapping_file.yml
+  when: (not cobbler_image_status) and (mapping_file == true)
+
 - name: Cobbler image creation
   import_tasks: cobbler_image.yml
-  when: not cobbler_status
+  when: not cobbler_container_status
 
 - name: Cobbler configuration
   import_tasks: configure_cobbler.yml
-  when: not cobbler_status
 
 - name: Cobbler container status message
   block:
     - debug:
         msg: "{{ message_skipped }}"
         verbosity: 2
-      when: cobbler_status
+      when: cobbler_container_status
     - debug:
         msg: "{{ message_installed }}"
         verbosity: 2
-      when: not cobbler_status
-  tags: install
+      when: not cobbler_container_status
+  tags: install

+ 84 - 0
appliance/roles/provision/tasks/mapping_file.yml

@@ -0,0 +1,84 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Check availability of mapping file
+  stat:
+    path: "{{ role_path }}/files/{{ mapping_file_name }}"
+  register: mapping_file_status
+  tags: install
+
+- name: Mapping file not present
+  fail:
+    msg: "{{ mapping_file_fail }}"
+  when: mapping_file_status.stat.exists == false
+  register: mapping_file_check
+  tags: install
+
+- name: Remove blank lines
+  shell:  awk -F, 'length>NF+1' {{ role_path }}/files/{{ mapping_file_name }} > {{ role_path }}/files/new_mapping_file.csv
+  changed_when: false
+  tags: install
+
+- name: Remove blank spaces
+  shell:  sed -i.bak -E 's/(^|,)[[:blank:]]+/\1/g; s/[[:blank:]]+(,|$)/\1/g'  {{ role_path }}/files/new_mapping_file.csv
+  args:
+    warn: no
+  changed_when: false
+  tags: install
+
+- name: Count the rows
+  shell: awk -F',' '{print $2}' {{ role_path }}/files/new_mapping_file.csv | wc -l
+  register: total_count
+  changed_when: false
+  tags: install
+
+- name: Check for duplicate hostname
+  shell: awk -F',' '{print $2}' {{ role_path }}/files/new_mapping_file.csv | uniq | wc -l
+  register: count_host
+  changed_when: false
+  tags: install
+
+- name: Fail if duplicate hosts exist
+  fail:
+    msg: "{{ fail_hostname_duplicate }}"
+  when:  total_count.stdout >  count_host.stdout
+  tags: install
+
+- name: Check if _ or . or space present in hostname
+  shell: awk -F',' '{print $2}' {{ role_path }}/files/new_mapping_file.csv |grep -E -- '_|\.| '
+  register: hostname_result
+  ignore_errors: true
+  changed_when: false
+  tags: install
+
+- name: Fail if  _ or . or space present in hostname
+  fail:
+    msg: "{{ hostname_result.stdout + ' :Hostname should not contain _ or . as it will cause error with slurm and K8s'}}"
+  when: hostname_result.stdout != ""
+  tags: install
+
+- name: Fetch input
+  blockinfile:
+    path: "{{ role_path }}/files/dhcp.template"
+    insertafter: '^#insert the static DHCP leases for configuration here'
+    block: |
+      host {{ item.split(',')[1] }} {
+        hardware ethernet {{ item.split(',')[0] }};
+        fixed-address {{ item.split(',')[2] }};
+      }
+    marker: "# {mark} DHCP BLOCK OF {{ item.split(',')[0] }}"
+  with_lines: "{{ remove_header }}"
+  ignore_errors: true
+  tags: install

+ 3 - 3
appliance/roles/provision/tasks/mount_iso.yml

@@ -32,13 +32,13 @@
 
 - name: Update mount status
   set_fact:
-    mount_check: result.failed
+    mount_check: "{{ result.failed }}"
   tags: install
 
 - name: Mount the iso file
-  command: mount -o loop {{ role_path }}/files/{{ iso_image }} /mnt/{{ iso_path }}
+  command: mount -o loop {{ role_path }}/files/{{ iso_name }} /mnt/{{ iso_path }}
   changed_when: false
   args:
     warn: no
-  when:  mount_check
+  when: mount_check == true
   tags: install

+ 25 - 82
appliance/roles/provision/tasks/provision_password.yml

@@ -26,97 +26,31 @@
     mode: 0644
   tags: install
 
-- name: Take provision Password
-  block:
-  - name: Provision Password (Min length should be 8)
-    pause:
-      prompt: "{{ prompt_password }}"
-      echo: no
-    register: prompt_admin_password
-    until:
-      - prompt_admin_password.user_input | length >  min_length| int  - 1
-    retries: "{{ no_of_retry }}"
-    delay: "{{ retry_delay }}"
-    when: admin_password is not defined and no_prompt is not defined
-  rescue:
-  - name: Abort if password validation fails
-    fail:
-      msg: "{{ msg_incorrect_format }}"
-  tags: install
-
-- name: Assert admin_password if prompt not given
-  assert:
-    that:
-        - admin_password | length >  min_length| int  - 1
-    success_msg: "{{ success_msg_pwd_format }}"
-    fail_msg: "{{ fail_msg_pwd_format }}"
-  register: msg_pwd_format
-  when: admin_password is defined and no_prompt is defined
-  tags: install
-
-- name: Save admin password
-  set_fact:
-    admin_password: "{{ prompt_admin_password.user_input }}"
-  when: no_prompt is not defined
-  tags: install
-
-- name: Confirm password
-  block:
-  - name: Confirm provision password
-    pause:
-      prompt: "{{ confirm_password }}"
-      echo: no
-    register: prompt_admin_password_confirm
-    until: admin_password == prompt_admin_password_confirm.user_input
-    retries: "{{ no_of_retry }}"
-    delay: "{{ retry_delay }}"
-    when: admin_password_confirm is not defined and no_prompt is not defined
-  rescue:
-  - name: Abort if password confirmation failed
-    fail:
-      msg: "{{ msg_failed_password_confirm }}"
-  tags: install
-
-- name: Assert admin_password_confirm if prompt not given
-  assert:
-    that: admin_password == admin_password_confirm
-    success_msg: "{{ success_msg_pwd_confirm }}"
-    fail_msg: "{{ fail_msg_pwd_confirm }}"
-  register: msg_pwd_confirm
-  when: admin_password_confirm is defined and no_prompt is defined
-  tags: install
-
 - name: Encrypt cobbler password
-  shell: >
-     set -o pipefail && \
-     digest="$( printf "%s:%s:%s" {{ username }} "Cobbler" {{ admin_password }} | md5sum | awk '{print $1}' )"
-     printf "%s:%s:%s\n" "{{ username }}" "Cobbler" "$digest" > "{{ role_path }}/files/.users.digest"
-  args:
-    executable: /bin/bash
+  shell: printf "%s:%s:%s" {{ username }} "Cobbler" "{{ cobbler_password }}" | md5sum | awk '{print $1}'
   changed_when: false
+  register: encrypt_password
+  no_log: true
   tags: install
 
-- name: Read password file
-  set_fact:
-    var: "{{ lookup('file', role_path+'/files/.users.digest').splitlines() }}"
+- name: Copy cobbler password to cobbler config file
+  shell: printf "%s:%s:%s\n" "{{ username }}" "Cobbler" "{{ encrypt_password.stdout }}" > "{{ role_path }}/files/.users.digest"
+  changed_when: false
+  no_log: true
   tags: install
 
-- name: Get encrypted password
-  set_fact:
-    encrypted_pass: "{{ var[0].split(':')[2] }}"
-
 - name: Create the kickstart file
   copy:
-    src: "{{ role_path }}/files/temp_centos8.ks"
-    dest: "{{ role_path }}/files/centos8.ks"
+    src: "{{ role_path }}/files/temp_centos7.ks"
+    dest: "{{ role_path }}/files/centos7.ks"
     mode: 0775
   tags: install
 
-- name: Configure kickstart file
+- name: Configure kickstart file- IP
   replace:
-    path: "{{ role_path }}/files/centos8.ks"
-    regexp: '^url --url http://ip/cblr/links/CentOS8-x86_64/'
-    replace: url --url http://{{ ansible_eno2.ipv4.address }}/cblr/links/CentOS8-x86_64/
+    path: "{{ role_path }}/files/centos7.ks"
+    regexp: '^url --url http://ip/cblr/links/CentOS7-x86_64/'
+    replace: url --url http://{{ hpc_ip }}/cblr/links/CentOS7-x86_64/
   tags: install
 
 - name: Random phrase generation
@@ -131,14 +65,23 @@
   tags: install
 
 - name: Login password
-  command: openssl passwd -1 -salt {{ random_phrase }} {{ admin_password }}
+  command: openssl passwd -1 -salt {{ random_phrase }} {{ cobbler_password }}
+  no_log: true
   changed_when: false
   register: login_pass
   tags: install
 
-- name: Configure kickstart file
+- name: Configure kickstart file- Password
   replace:
-    path: "{{ role_path }}/files/centos8.ks"
+    path: "{{ role_path }}/files/centos7.ks"
     regexp: '^rootpw --iscrypted password'
     replace: 'rootpw --iscrypted {{ login_pass.stdout }}'
+  no_log: true
+  tags: install
+
+- name: Configure kickstart file- nic
+  replace:
+    path: "{{ role_path }}/files/centos7.ks"
+    regexp: '^network  --bootproto=dhcp --device=nic --onboot=on'
+    replace: 'network  --bootproto=dhcp --device={{ nic }} --onboot=on'
   tags: install

+ 13 - 21
appliance/roles/provision/vars/main.yml

@@ -15,36 +15,28 @@
 
 # vars file for provision
 
+#Usage: mapping_file.yml
+mapping_file_name: mapping_file.csv
+mapping_file_fail: "Mapping file not found. Copy the mapping_file.csv to omnia/appliance/roles/provision/files"
+fail_hostname_duplicate:  "Duplicate hostname exists. Please verify mapping file again."
+remove_header: awk 'NR > 1 { print }' {{ role_path }}/files/new_mapping_file.csv
+
+#Usage: check_prerequisite.yml
+iso_name: CentOS-7-x86_64-Minimal-2009.iso
+iso_fail: "Iso file not found. Download and copy the iso file to omnia/appliance/roles/provision/files"
+
 # Usage: provision_password.yml
 provision_encrypted_dest: ../files/
-min_length: 8
-no_of_retry: 3
-retry_delay: 0.001
 username: cobbler
-prompt_password: "Enter cobbler password.( Min. Length of Password should be {{ min_length| int }}." 
-confirm_password: "Confirm cobbler Password"
-msg_incorrect_format: "Failed. Incorrect format."
-msg_failed_password_confirm: "Failed. Passwords did not match"
-success_msg_pwd_format: "admin_password validated"
-fail_msg_pwd_format: "admin_password validation failed"
-success_msg_pwd_confirm: "admin_password confirmed"
-fail_msg_pwd_confirm: "admin_password confirmation failed"
-success_msg_format: "random_phrase validated"
-fail_msg_format: "random_phrase validation failed"
 
 # Usage: cobbler_image.yml
 docker_image_name: cobbler
 docker_image_tag: latest
-cobbler_run_command: docker run -itd --privileged --net=host --restart=always -v cobbler_www:/var/www/cobbler:Z -v cobbler_backup:/var/lib/cobbler/backup:Z -v /mnt/iso:/mnt:Z -p 69:69/udp -p 81:80 -p 443:443 -p 25151:25151 --name cobbler  cobbler:latest  /sbin/init
-
+cobbler_run_command: docker run -itd --privileged --net=host --restart=always -v {{ mount_path }}:/root/omnia  -v cobbler_www:/var/www/cobbler:Z -v cobbler_backup:/var/lib/cobbler/backup:Z -v /mnt/iso:/mnt:Z -p 69:69/udp -p 81:80 -p 443:443 -p 25151:25151 --name cobbler  cobbler:latest  /sbin/init
 
 # Usage: main.yml
-message_skipped: "Installation Skipped: Cobbler instance is already running on your system"
+message_skipped: "Installation Skipped: Cobbler instance is already running in your system"
 message_installed: "Installation Successful"
 
-# Usage: os_provsion.yml
-iso_image: CentOS-8.2.2004-x86_64-minimal.iso 
+# Usage: mount_iso.yml
 iso_path: iso
-
-# Usage: configure_nic.yml
-eno: eno1

+ 164 - 33
appliance/roles/web_ui/files/awx_configuration.yml

@@ -13,78 +13,159 @@
 # limitations under the License.
 ---
 
-# Playbook to configure AWX
-- name: Configure AWX
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  tasks:
-    - name: Include vars file
-      include_vars: ../vars/main.yml
-
-    # Get Current AWX configuration
+# Get Current AWX configuration
+- name: Waiting for 30 seconds for UI components to be accessible
+  wait_for:
+    timeout: 30
+
+- name: Organization list
+  block:
     - name: Get organization list
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         organizations list -f human
       register: organizations_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ organizations_list.stderr | regex_replace(awx_user) | regex_replace(admin_password) }}"
 
+- name: Project list
+  block:
     - name: Get project list
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         projects list -f human
       register: projects_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ projects_list.stderr | regex_replace(awx_user) | regex_replace(admin_password) }}"
 
+- name: Inventory list
+  block:
     - name: Get inventory list
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         inventory list -f human
       register: inventory_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ inventory_list.stderr | regex_replace(awx_user) | regex_replace(admin_password) }}"
+
+- name: Credential list
+  block:
+    - name: Get credentials list
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        credentials list -f human
+      register: credentials_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ credentials_list.stderr | regex_replace(awx_user) | regex_replace(admin_password) }}"
 
+- name: Template List
+  block:
     - name: Get template list
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         job_templates list -f human
       register: job_templates_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ job_templates_list.stderr | regex_replace(awx_user) | regex_replace(admin_password) }}"
 
+- name: Group names
+  block:
     - name: If omnia-inventory exists, fetch group names in the inventory
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         groups list --inventory "{{ omnia_inventory_name }}" -f human
       register: groups_list
       when: omnia_inventory_name in inventory_list.stdout
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ groups_list.stderr | regex_replace(awx_user) | regex_replace(admin_password) }}"
 
+- name: Schedules list
+  block:
     - name: Get schedules list
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         schedules list -f human
       register: schedules_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ schedules_list.stderr | regex_replace(awx_user) | regex_replace(admin_password) }}"
 
-    # Delete Default Configurations
+# Delete Default Configurations
+- name: Delete default configurations
+  block:
     - name: Delete default organization
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         organizations delete "{{ default_org }}"
       when: default_org in organizations_list.stdout
+      register: register_error
+      no_log: True
 
     - name: Delete default job template
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         job_templates delete "{{ default_template }}"
       when: default_template in job_templates_list.stdout
+      register: register_error
+      no_log: True
 
     - name: Delete default project
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         projects delete "{{ default_projects }}"
       when: default_projects in projects_list.stdout
+      register: register_error
+      no_log: True
+
+    - name: Delete default credential
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        credentials delete "{{ default_credentials }}"
+      when: default_credentials in credentials_list.stdout
+      register: register_error
+      no_log: True
 
-    # Create required configuration if not present
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ register_error.stderr | regex_replace(awx_user) | regex_replace(admin_password) }}"
+
+# Create required configuration if not present
+- name: Create required configurations
+  block:
     - name: Create organisation
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         organizations create --name "{{ organization_name }}"
       when: organization_name not in organizations_list.stdout
+      register: register_error
+      no_log: True
 
     - name: Create new project
       command: >-
@@ -92,44 +173,86 @@
         projects create --name "{{ project_name }}" --organization "{{ organization_name }}"
         --local_path "{{ dir_name }}"
       when: project_name not in projects_list.stdout
+      register: register_error
+      no_log: True
 
     - name: Create new omnia inventory
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         inventory create --name "{{ omnia_inventory_name }}" --organization "{{ organization_name }}"
       when: omnia_inventory_name not in inventory_list.stdout
+      register: register_error
+      no_log: True
 
     - name: Create groups in omnia inventory
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
         groups create --name "{{ item }}" --inventory "{{ omnia_inventory_name }}"
       when: omnia_inventory_name not in inventory_list.stdout or item not in groups_list.stdout
+      register: register_error
+      no_log: True
       loop: "{{ group_names }}"
 
-    - name: Create template to deploy omnia
+    - name: Create credentials for omnia
       command: >-
         awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
-        job_templates create
-        --name "{{ omnia_template_name }}"
-        --job_type run
-        --inventory "{{ omnia_inventory_name }}"
-        --project "{{ project_name }}"
-        --playbook "{{ omnia_playbook }}"
-        --verbosity "{{ playbooks_verbosity }}"
-        --ask_skip_tags_on_launch true
+        credentials create --name "{{ credential_name }}" --organization "{{ organization_name }}"
+        --credential_type "{{ credential_type }}"
+        --inputs '{"username": "{{ cobbler_username }}", "password": "{{ cobbler_password }}"}'
+      when: credential_name not in credentials_list.stdout
+      register: register_error
+      no_log: True
+
+    - name: DeployOmnia Template
+      block:
+        - name: Create template to deploy omnia
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+            job_templates create
+            --name "{{ omnia_template_name }}"
+            --job_type run
+            --inventory "{{ omnia_inventory_name }}"
+            --project "{{ project_name }}"
+            --playbook "{{ omnia_playbook }}"
+            --verbosity "{{ playbooks_verbosity }}"
+            --ask_skip_tags_on_launch true
+          register: register_error
+          no_log: True
+
+        - name: Associate credential
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+            job_templates associate "{{ omnia_template_name }}"
+            --credential ""{{ credential_name }}""
+          register: register_error
+          no_log: True
+
       when: omnia_template_name not in job_templates_list.stdout
 
-    - name: Create template to fetch dynamic inventory
-      command: >-
-        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
-        job_templates create
-        --name "{{ inventory_template_name }}"
-        --job_type run
-        --inventory "{{ omnia_inventory_name }}"
-        --project "{{ project_name }}"
-        --playbook "{{ inventory_playbook }}"
-        --verbosity "{{ playbooks_verbosity }}"
-        --use_fact_cache true
+    - name: DynamicInventory template
+      block:
+        - name: Create template to fetch dynamic inventory
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+            job_templates create
+            --name "{{ inventory_template_name }}"
+            --job_type run
+            --inventory "{{ omnia_inventory_name }}"
+            --project "{{ project_name }}"
+            --playbook "{{ inventory_playbook }}"
+            --verbosity "{{ playbooks_verbosity }}"
+            --use_fact_cache true
+          register: register_error
+          no_log: True
+
+        - name: Associate credential
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+            job_templates associate "{{ inventory_template_name }}"
+            --credential ""{{ credential_name }}""
+          register: register_error
+          no_log: True
+
       when: inventory_template_name not in job_templates_list.stdout
 
     - name: Schedule dynamic inventory template
@@ -138,6 +261,7 @@
           command: >-
             awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
             unified_job_templates list --name "{{ inventory_template_name }}" -f human
+          no_log: True
           register: unified_job_template_list
 
         - name: Get job ID
@@ -149,5 +273,12 @@
             awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
             schedules create --name "{{ schedule_name }}"
             --unified_job_template="{{ job_id }}" --rrule="{{ schedule_rule }}"
+          register: register_error
+          no_log: True
+
+      when: schedule_name not in schedules_list.stdout
 
-      when: schedule_name not in schedules_list.stdout
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ register_error.stderr | regex_replace(awx_user) | regex_replace(admin_password) }}"

+ 0 - 118
appliance/roles/web_ui/tasks/awx_password.yml

@@ -1,118 +0,0 @@
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-
-#Tasks for getting and encrypting AWX Password
-- name: Clone AWX repo
-  git:
-    repo: "{{ awx_git_repo }}"
-    dest: "{{ awx_repo_path }}"
-    force: yes
-  tags: install
-
-- name: AWX password
-  block:
-    - name: Take awx password
-      pause:
-        prompt: "{{ prompt_password }}"
-        echo: no
-      register: prompt_admin_password
-      until:
-        - prompt_admin_password.user_input | length >  min_length| int  - 1
-        - '"-" not in prompt_admin_password.user_input '
-        - '"\\" not in prompt_admin_password.user_input '
-        - '"\"" not in prompt_admin_password.user_input '
-        - " \"'\" not in prompt_admin_password.user_input "
-      retries: "{{ retries }}"
-      delay: "{{ retry_delay }}"
-      when: admin_password is not defined and no_prompt is not defined
-  rescue:
-    - name: Abort if password validation fails
-      fail:
-        msg: "{{ msg_incorrect_password_format }}"
-  tags: install
-
-- name: Assert admin_password if prompt not given
-  assert:
-    that:
-        - admin_password | length >  min_length| int  - 1
-        - '"-" not in admin_password '
-        - '"\\" not in admin_password '
-        - '"\"" not in admin_password '
-        - " \"'\" not in admin_password "
-    success_msg: "{{ success_msg_pwd_format }}"
-    fail_msg: "{{ fail_msg_pwd_format }}"
-  register: msg_pwd_format
-  when: admin_password is defined and no_prompt is defined
-
-- name: Save admin password
-  set_fact:
-    admin_password: "{{ prompt_admin_password.user_input }}"
-  when: no_prompt is not defined
-
-- name: Confirmation
-  block:
-    - name: Confirm AWX password
-      pause:
-        prompt: "{{ confirm_password }}"
-        echo: no
-      register: prompt_admin_password_confirm
-      until: admin_password == prompt_admin_password_confirm.user_input
-      retries: "{{ confirm_retries }}"
-      delay: "{{ retry_delay }}"
-      when: admin_password_confirm is not defined and no_prompt is not defined
-  rescue:
-    - name: Abort if password confirmation failed
-      fail:
-        msg: "{{ msg_failed_password_confirm }}"
-  tags: install
-
-- name: Assert admin_password_confirm if prompt not given
-  assert:
-    that: admin_password == admin_password_confirm
-    success_msg: "{{ success_msg_pwd_confirm }}"
-    fail_msg: "{{ fail_msg_pwd_confirm }}"
-  register: msg_pwd_confirm
-  when: admin_password_confirm is defined and no_prompt is defined
-
-- name: Create ansible vault key
-  set_fact:
-    vault_key: "{{ lookup('password', '/dev/null chars=ascii_letters') }}"
-  tags: install
-
-- name: Save vault key
-  copy:
-    dest: "{{ awx_installer_path + vault_file }}"
-    content: |
-      {{ vault_key }}
-    owner: root
-    force: yes
-  tags: install
-
-- name: Encrypt awx password
-  command: ansible-vault encrypt_string "{{ admin_password }}" --name admin_password --vault-password-file "{{ vault_file }}"
-  register: encrypt_password
-  args:
-    chdir: "{{ awx_installer_path }}"
-  tags: install
-
-- name: Store encrypted password
-  copy:
-    dest: "{{ awx_installer_path + awx_password_file }}"
-    content: |
-      ---
-      {{ encrypt_password.stdout }}
-    force: yes
-    owner: root
-  tags: install

+ 0 - 8
appliance/roles/web_ui/tasks/check_prerequisites.yml

@@ -17,7 +17,6 @@
 - name: Initialize variables
   set_fact:
     awx_status: false
-    awx_task_status: false
   tags: install
 
 - name: Check awx_task status on the machine
@@ -26,17 +25,10 @@
   register: awx_task_result
   tags: install
 
-- name: Update awx status
-  set_fact:
-    awx_task_status: true
-  when: awx_task_result.exists
-  tags: install
-
 - name: Check awx_web status on the machine
   docker_container_info:
     name: awx_web
   register: awx_web_result
-  when: awx_task_status
   tags: install
 
 - name: Update awx status

+ 22 - 0
appliance/roles/web_ui/tasks/clone_awx.yml

@@ -0,0 +1,22 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Clone AWX repo
+  git:
+    repo: "{{ awx_git_repo }}"
+    dest: "{{ awx_repo_path }}"
+    force: yes
+    version: 15.0.0
+  tags: install

+ 27 - 8
appliance/roles/web_ui/tasks/install_awx.yml

@@ -14,10 +14,6 @@
 ---
 
 # Tasks for installing AWX
-- name: Store omnia parent directory path
-  set_fact:
-     dir_path:
-  tags: install
 
 - name: Change inventory file
   replace:
@@ -32,14 +28,37 @@
     label: "{{ item.name }}"
   tags: install
 
+- name: Ensure port is 8081
+  lineinfile:
+    path: "{{ awx_inventory_path }}"
+    regexp: "{{ port_old }}"
+    line: "{{ port_new }}"
+    state: present
+
 - name: Create pgdocker directory
   file:
     path: "{{ pgdocker_dir_path }}"
     state: directory
+    mode: 0775
   tags: install
 
-- name: Run AWX install.yml file
-  command: ansible-playbook -i inventory install.yml -e @"{{ awx_password_file }}" --vault-password-file "{{ vault_file }}"
-  args:
-    chdir: "{{ awx_installer_path }}"
+- name: Install AWX
+  block:
+    - name: Run AWX install.yml file
+      command: ansible-playbook -i inventory install.yml --extra-vars "admin_password={{ admin_password }}"
+      args:
+        chdir: "{{ awx_installer_path }}"
+      register: awx_installation
+      no_log: True
+
+  rescue:
+    - name: Check AWX status on machine
+      include_tasks: check_awx_status.yml
+
+    - name: Fail if container are not running
+      fail:
+        msg: "AWX installation failed with error msg:
+        {{ awx_installation.stdout | regex_replace(admin_password) }}."
+      when: not awx_status
+
   tags: install

+ 4 - 2
appliance/roles/web_ui/tasks/install_awx_cli.yml

@@ -16,10 +16,12 @@
 # Tasks for installing AWX-CLI
 - name: Add AWX CLI repo
   block:
-    - get_url:
+    - name: Get repo
+      get_url:
         url: "{{ awx_cli_repo }}"
         dest: "{{ awx_cli_repo_path }}"
-    - replace:
+    - name: Disable gpgcheck
+      replace:
         path: "{{ awx_cli_repo_path }}"
         regexp: 'gpgcheck=1'
         replace: 'gpgcheck=0'

+ 14 - 8
appliance/roles/web_ui/tasks/main.yml

@@ -15,7 +15,7 @@
 
 # Tasks for Deploying AWX on the system
 - name: Check AWX status on machine
-  include_tasks: check_prerequisites.yml
+  include_tasks: check_awx_status.yml
   tags: install
 
 - name: Include common variables
@@ -27,8 +27,8 @@
   when: not awx_status
   tags: install
 
-- name: Get and encrypt AWX password
-  include_tasks: awx_password.yml
+- name: Clone AWX repo
+  include_tasks: clone_awx.yml
   when: not awx_status
   tags: install
 
@@ -62,9 +62,15 @@
   include_tasks: install_awx_cli.yml
   tags: install
 
-- name: AWX configuration
-  command: >-
-    ansible-playbook "{{ role_path }}"/files/awx_configuration.yml
-    -e @"{{ awx_installer_path + awx_password_file }}"
-    --vault-password-file "{{ awx_installer_path + vault_file }}"
+- name: Check if AWX-UI is accessible
+  include_tasks: ui_accessibility.yml
   tags: install
+
+- name: Configure AWX
+  block:
+    - include_tasks: awx_configuration.yml
+  rescue:
+    - name: Display msg
+      debug:
+        msg: "{{ conf_fail_msg }}"
+  tags: install

+ 85 - 0
appliance/roles/web_ui/tasks/ui_accessibility.yml

@@ -0,0 +1,85 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# Check accessibility of AWX-UI
+- name: Re-install if in migrating state
+  block:
+    - name: Wait for AWX UI to be up
+      uri:
+        url: "{{ awx_ip }}"
+        status_code: "{{ return_status }}"
+        return_content: yes
+      register: register_error
+      until: awx_ui_msg in register_error.content
+      retries: 20
+      delay: 15
+      changed_when: no
+      no_log: True
+
+  rescue:
+    - name: Starting rescue
+      debug:
+        msg: "Attempting to re-install AWX"
+
+    - name: Remove old containers
+      docker_container:
+        name: "{{ item }}"
+        state: absent
+      loop:
+        - awx_task
+        - awx_web
+
+    - name: Restart docker
+      service:
+        name: docker
+        state: restarted
+
+    - name: Re-install AWX
+      block:
+        - name: Run AWX install.yml file
+          command: ansible-playbook -i inventory install.yml --extra-vars "admin_password={{ admin_password }}"
+          args:
+            chdir: "{{ awx_installer_path }}"
+          register: awx_installation
+          no_log: True
+
+      rescue:
+        - name: Check AWX status on machine
+          include_tasks: check_awx_status.yml
+
+        - name: Fail if container are not running
+          fail:
+            msg: "AWX installation failed with error msg:
+             {{ awx_installation.stdout | regex_replace(admin_password) }}."
+          when: not awx_status
+
+    - name: Check if AWX UI is up
+      block:
+        - name: Wait for AWX UI to be up
+          uri:
+            url: "{{ awx_ip }}"
+            status_code: "{{ return_status }}"
+            return_content: yes
+          register: register_error
+          until: awx_ui_msg in register_error.content
+          retries: 30
+          delay: 10
+          changed_when: no
+          no_log: True
+      rescue:
+        - name: Message
+          fail:
+            msg: "{{ register_error | regex_replace(awx_user) | regex_replace(admin_password) }}"
+  tags: install

+ 14 - 19
appliance/roles/web_ui/vars/main.yml

@@ -15,25 +15,11 @@
 
 # vars file for web_ui
 
-# Usage: awx_password.yml
+# Usage: clone_awx.yml
 awx_git_repo: "https://github.com/ansible/awx.git"
-min_length: 8
-retries: 3
-confirm_retries: 1
-retry_delay: 0.01
-prompt_password: "Enter AWX password.( Min. Length of Password should be {{ min_length| int }}. Dont use chars: - \' \\ \" )"
-confirm_password: "Confirm AWX Password"
-msg_incorrect_password_format: "Failed. Password format not correct."
-msg_failed_password_confirm: "Failed. Passwords did not match"
 docker_volume: "/var/lib/docker/volumes/{{ docker_volume_name }}"
 awx_repo_path: "{{ docker_volume }}/awx/"
 awx_installer_path: "{{ awx_repo_path }}/installer/"
-vault_file: .vault_key
-awx_password_file: .password.yml
-success_msg_pwd_format: "admin_password validated"
-fail_msg_pwd_format: "admin_password validation failed"
-success_msg_pwd_confirm: "admin_password confirmed"
-fail_msg_pwd_confirm: "admin_password confirmation failed"
 
 # Usage: install_awx.yml
 awx_inventory_path: "{{ awx_repo_path }}/installer/inventory"
@@ -44,21 +30,27 @@ awx_alternate_dns_servers_old: '#awx_alternate_dns_servers="10.1.2.3,10.2.3.4"'
 awx_alternate_dns_servers_new: 'awx_alternate_dns_servers="8.8.8.8,8.8.4.4"'
 admin_password_old: "admin_password=password"
 admin_password_new: "#admin_password=password"
+port_old: "host_port=80"
+port_new: "host_port=8081"
 
 # Usage: main.yml
 message_skipped: "Installation Skipped: AWX instance is already running on your system"
 message_installed: "Installation Successful"
+awx_ip: http://localhost:8081
+return_status: 200
+awx_ui_msg: "Password Dialog"
+conf_fail_msg: "AWX configuration failed at the last executed task."
 
 # Usage: install_awx_cli.yml
-awx_cli_repo: "https://releases.ansible.com/ansible-tower/cli/ansible-tower-cli-centos8.repo"
-awx_cli_repo_path: "/etc/yum.repos.d/ansible-tower-cli-centos8.repo"
+awx_cli_repo: "https://releases.ansible.com/ansible-tower/cli/ansible-tower-cli-centos7.repo"
+awx_cli_repo_path: "/etc/yum.repos.d/ansible-tower-cli-centos7.repo"
 
 # Usage: awx_configuration.yml
-awx_ip: http://localhost
 awx_user: admin         #Don't change it. It is set as admin while installing AWX
 default_org: Default
 default_template: 'Demo Job Template'
 default_projects: 'Demo Project'
+default_credentials: 'Demo Credential'
 dir_name: omnia
 organization_name: DellEMC
 project_name: omnia
@@ -66,10 +58,13 @@ omnia_inventory_name: omnia_inventory
 group_names:
   - manager
   - compute
+credential_name: omnia_credential
+credential_type: Machine
+cobbler_username: root
 omnia_template_name: DeployOmnia
 omnia_playbook: omnia.yml
 inventory_template_name: DynamicInventory
 inventory_playbook: appliance/inventory.yml
 playbooks_verbosity: 0
 schedule_name: DynamicInventorySchedule
-schedule_rule: "DTSTART:20201201T000000Z RRULE:FREQ=MINUTELY;INTERVAL=10"
+schedule_rule: "DTSTART:20201201T000000Z RRULE:FREQ=MINUTELY;INTERVAL=10"

+ 42 - 0
appliance/test/appliance_config_empty.yml

@@ -0,0 +1,42 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Password used while deploying OS on bare metal servers and for Cobbler UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+provision_password: ""
+
+# Password used for the AWX UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+awx_password: ""
+
+# The nic/ethernet card that needs to be connected to the HPC switch.
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is em1.
+hpc_nic: "em1"
+
+# The nic/ethernet card that will be connected to the public internet.
+# Default value of nic is em2
+public_nic: "em2"
+
+# The mapping file consists of the MAC address and its respective IP address and hostname.
+# If user wants to provide a mapping file, set this value to "true"
+# The format of mapping file should be MAC,hostname,IP and must be a CSV file.
+mapping_file_exists: ""
+
+# The dhcp range for assigning the IP address to the baremetal nodes.
+dhcp_start_ip_range: ""
+dhcp_end_ip_range: ""

+ 42 - 0
appliance/test/appliance_config_test.yml

@@ -0,0 +1,42 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Password used while deploying OS on bare metal servers and for Cobbler UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+provision_password: "omnia@123"
+
+# Password used for the AWX UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+awx_password: "omnia@123"
+
+# The nic/ethernet card that needs to be connected to the HPC switch.
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is em1.
+hpc_nic: "em1"
+
+# The nic/ethernet card that will be connected to the public internet.
+# Default value of nic is em2
+public_nic: "em2"
+
+# The mapping file consists of the MAC address and its respective IP address and hostname.
+# If user wants to provide a mapping file, set this value to "true"
+# The format of mapping file should be MAC,hostname,IP and must be a CSV file.
+mapping_file_exists: "false"
+
+# The dhcp range for assigning the IP address to the baremetal nodes.
+dhcp_start_ip_range: "172.17.0.20"
+dhcp_end_ip_range: "172.17.0.100"

+ 0 - 3
appliance/test/cobbler_inventory

@@ -1,3 +0,0 @@
-[cobbler_servers]
-172.17.0.10
-100.98.24.231

+ 3 - 0
appliance/test/provisioned_hosts.yml

@@ -0,0 +1,3 @@
+[all]
+172.17.0.10
+172.17.0.15

文件差異過大導致無法顯示
+ 1661 - 15
appliance/test/test_common.yml


+ 891 - 0
appliance/test/test_omnia.yml

@@ -0,0 +1,891 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+# Testcase OMNIA_CRM_US_AWXD_TC_006
+# Test case to validate whether the proper error message is displayed when slurm and kubernetes tags are skipped
+- name: OMNIA_CRM_US_AWXD_TC_006
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_omnia_vars.yml
+  tasks:
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_006
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_filename }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_006
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_006
+
+    - name: Creating inventory file with hosts associated to the groups
+      copy:
+        dest: "testinventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+          ---
+          manager:
+            hosts:
+              {{ host1 }}
+
+          compute:
+            hosts:
+              {{ host2 }}
+      tags: TC_006
+
+    - name: Push the inventory to AWX
+      shell: |
+        set -o pipefail
+        docker exec awx_task awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source "{{ inventory_path }}/testinventory.yml"
+      changed_when: false
+      tags: TC_006
+
+    - block:
+        - name: Launch the job template
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+            job_templates launch "{{ omnia_template_name }}" --credentials "{{ credential_name }}" --skip_tags slurm,kubernetes --monitor -f human
+          changed_when: false
+          register: command_output
+
+      rescue:
+        - name: Validate error message
+          assert:
+            that: "'FAILED!' in command_output.stdout"
+            success_msg: "{{ test_case_success_msg }}"
+            fail_msg: "{{ test_case_failure_msg }}"
+      tags: TC_006
+
+    - name: Delete the hosts
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host1 }} --monitor -f human
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host2 }} --monitor -f human
+      changed_when: false
+      tags: TC_006
+
+    - name: Delete the inventory file
+      ignore_errors: yes
+      file:
+        state: absent
+        path: testinventory.yml
+      tags: TC_006
+
+    - name: Create inventory file if it doesn't exist
+      ignore_errors: yes
+      file:
+        path: "testinventory.yml"
+        state: touch
+        mode: '{{ file_permission }}'
+      tags: TC_006
+
+# Testcase OMNIA_CRM_US_AWXD_TC_007
+# Test case to validate whether the skip tags validation is passed when slurm tag is given
+- name: OMNIA_CRM_US_AWXD_TC_007
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_omnia_vars.yml
+  tasks:
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_007
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_filename }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_007
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_007
+
+    - name: Creating inventory file with hosts associated to the groups
+      copy:
+        dest: "testinventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+          ---
+          manager:
+            hosts:
+              {{ host1 }}
+
+          compute:
+            hosts:
+              {{ host2 }}
+      tags: TC_007
+
+    - name: Push the inventory to AWX
+      shell: |
+        set -o pipefail
+        docker exec awx_task awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source "{{ inventory_path }}/testinventory.yml"
+      changed_when: false
+      tags: TC_007
+
+    - block:
+        - name: Launch the job template
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+            job_templates launch "{{ omnia_template_name }}" --credentials "{{ credential_name }}" --skip_tags slurm --monitor -f human
+          changed_when: false
+          register: command_output
+
+    - name: Validate success message
+      assert:
+        that: "'FAILED!' not in command_output.stdout"
+        success_msg: "{{ test_case_success_msg }}"
+        fail_msg: "{{ test_case_failure_msg }}"
+      tags: TC_007
+
+    - name: Delete the hosts
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host1 }} --monitor -f human
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host2 }} --monitor -f human
+      changed_when: false
+      tags: TC_007
+
+    - name: Delete the inventory file
+      ignore_errors: yes
+      file:
+        state: absent
+        path: testinventory.yml
+      tags: TC_007
+
+    - name: Create inventory file if it doesn't exist
+      ignore_errors: yes
+      file:
+        path: "testinventory.yml"
+        state: touch
+        mode: '{{ file_permission }}'
+      tags: TC_007
+
+# Testcase OMNIA_CRM_US_AWXD_TC_008
+# Test case to validate whether the skip tags validation is passed when kubernetes tag is given
+- name: OMNIA_CRM_US_AWXD_TC_008
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_omnia_vars.yml
+  tasks:
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_008
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_filename }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_008
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_008
+
+    - name: Creating inventory file with hosts associated to the groups
+      copy:
+        dest: "testinventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+          ---
+          manager:
+            hosts:
+              {{ host1 }}
+
+          compute:
+            hosts:
+              {{ host2 }}
+      tags: TC_008
+
+    - name: Push the inventory to AWX
+      shell: |
+        set -o pipefail
+        docker exec awx_task awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source "{{ inventory_path }}/testinventory.yml"
+      changed_when: false
+      tags: TC_008
+
+    - block:
+        - name: Launch the job template
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+            job_templates launch "{{ omnia_template_name }}" --credentials "{{ credential_name }}" --skip_tags kubernetes --monitor -f human
+          changed_when: false
+          register: command_output
+
+    - name: Validate success message
+      assert:
+        that: "'FAILED!' not in command_output.stdout"
+        success_msg: "{{ test_case_success_msg }}"
+        fail_msg: "{{ test_case_failure_msg }}"
+      tags: TC_008
+
+    - name: Delete the hosts
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host1 }} --monitor -f human
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host2 }} --monitor -f human
+      changed_when: false
+      tags: TC_008
+
+    - name: Delete the inventory file
+      ignore_errors: yes
+      file:
+        state: absent
+        path: testinventory.yml
+      tags: TC_008
+
+    - name: Create inventory file if it doesn't exist
+      ignore_errors: yes
+      file:
+        path: "testinventory.yml"
+        state: touch
+        mode: '{{ file_permission }}'
+      tags: TC_008
+
+# Testcase OMNIA_CRM_US_AWXD_TC_009
+# Test case to validate whether the proper error message is displayed when no host is added to manager group
+- name: OMNIA_CRM_US_AWXD_TC_009
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_omnia_vars.yml
+  tasks:
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_009
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_filename }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_009
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_009
+
+    - name: Creating inventory file with hosts associated to the groups
+      copy:
+        dest: "testinventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+          ---
+          manager:
+            hosts:
+
+          compute:
+            hosts:
+              {{ host2 }}
+      tags: TC_009
+
+    - name: Push the inventory to AWX
+      shell: |
+        set -o pipefail
+        docker exec awx_task awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source "{{ inventory_path }}/testinventory.yml"
+      changed_when: false
+      tags: TC_009
+
+    - block:
+        - name: Launch the job template
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+            job_templates launch "{{ omnia_template_name }}" --credentials "{{ credential_name }}" --monitor -f human
+          changed_when: false
+          register: command_output
+
+      rescue:
+        - name: Validate error message
+          assert:
+            that: "'FAILED!' in command_output.stdout"
+            success_msg: "{{ test_case_success_msg }}"
+            fail_msg: "{{ test_case_failure_msg }}"
+      tags: TC_009
+
+    - name: Delete the hosts
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host2 }} --monitor -f human
+      changed_when: false
+      tags: TC_009
+
+    - name: Delete the inventory file
+      ignore_errors: yes
+      file:
+        state: absent
+        path: testinventory.yml
+      tags: TC_009
+
+    - name: Create inventory file if it doesn't exist
+      ignore_errors: yes
+      file:
+        path: "testinventory.yml"
+        state: touch
+        mode: '{{ file_permission }}'
+      tags: TC_009
+
+# Testcase OMNIA_CRM_US_AWXD_TC_010
+# Test case to verify whether the manger group validation is passed when single host is present
+- name: OMNIA_CRM_US_AWXD_TC_010
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_omnia_vars.yml
+  tasks:
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_010
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_filename }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_010
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_010
+
+    - name: Creating inventory file with hosts associated to the groups
+      copy:
+        dest: "testinventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+          ---
+          manager:
+            hosts:
+              {{ host1 }}
+
+          compute:
+            hosts:
+              {{ host2 }}
+      tags: TC_010
+
+    - name: Push the inventory to AWX
+      shell: |
+        set -o pipefail
+        docker exec awx_task awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source "{{ inventory_path }}/testinventory.yml"
+      changed_when: false
+      tags: TC_010
+
+    - block:
+        - name: Launch the job template
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+            job_templates launch "{{ omnia_template_name }}" --credentials "{{ credential_name }}" --monitor -f human
+          changed_when: false
+          register: command_output
+      tags: TC_010
+
+    - name: Validate success message
+      assert:
+        that: "'FAILED!' not in command_output.stdout"
+        success_msg: "{{ test_case_success_msg }}"
+        fail_msg: "{{ test_case_failure_msg }}"
+      tags: TC_010
+
+    - name: Delete the hosts
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host1 }} --monitor -f human
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host2 }} --monitor -f human
+      changed_when: false
+      tags: TC_010
+
+    - name: Delete the inventory file
+      ignore_errors: yes
+      file:
+        state: absent
+        path: testinventory.yml
+      tags: TC_010
+
+    - name: Create inventory file if it doesn't exist
+      ignore_errors: yes
+      file:
+        path: "testinventory.yml"
+        state: touch
+        mode: '{{ file_permission }}'
+      tags: TC_010
+
+# Testcase OMNIA_CRM_US_AWXD_TC_011
+# Test case to validate whether the proper error message is displayed when no host is added to compute group
+- name: OMNIA_CRM_US_AWXD_TC_011
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_omnia_vars.yml
+  tasks:
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_011
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_filename }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_011
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_011
+
+    - name: Creating inventory file with hosts associated to the groups
+      copy:
+        dest: "testinventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+          ---
+          manager:
+            hosts:
+              {{ host3 }}
+
+          compute:
+            hosts:
+            
+      tags: TC_011
+
+    - name: Push the inventory to AWX
+      shell: |
+        set -o pipefail
+        docker exec awx_task awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source "{{ inventory_path }}/testinventory.yml"
+      changed_when: false
+      tags: TC_011
+
+    - block:
+        - name: Launch the job template
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+            job_templates launch "{{ omnia_template_name }}" --credentials "{{ credential_name }}" --monitor -f human
+          changed_when: false
+          register: command_output
+
+      rescue:
+        - name: Validate error message
+          assert:
+            that: "'FAILED!' in command_output.stdout"
+            success_msg: "{{ test_case_success_msg }}"
+            fail_msg: "{{ test_case_failure_msg }}"
+      tags: TC_011
+
+    - name: Delete the hosts
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host3 }} --monitor -f human
+      changed_when: false
+      tags: TC_011
+
+    - name: Delete the inventory file
+      file:
+        state: absent
+        path: testinventory.yml
+      ignore_errors: yes
+      tags: TC_011
+
+    - name: Create inventory file if it doesn't exist
+      file:
+        path: "testinventory.yml"
+        mode: '{{ file_permission }}'
+        state: touch
+      ignore_errors: yes
+      tags: TC_011
+
+# Testcase OMNIA_CRM_US_AWXD_TC_012
+# Test case to verify whether the compute group validation is passed when more than 1 host is present
+- name: OMNIA_CRM_US_AWXD_TC_012
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_omnia_vars.yml
+  tasks:
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_012
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_filename }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_012
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_012
+
+    - name: Creating inventory file with hosts associated to the groups
+      copy:
+        dest: "testinventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+          ---
+          manager:
+            hosts:
+              {{ host1 }}
+
+          compute:
+            hosts:
+              {{ host2 }}
+            hosts:
+              {{ host3 }}
+      tags: TC_012
+
+    - name: Push the inventory to AWX
+      shell: |
+        set -o pipefail
+        docker exec awx_task awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source "{{ inventory_path }}/testinventory.yml"
+      changed_when: false
+      tags: TC_012
+
+    - block:
+        - name: Launch the job template
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+            job_templates launch "{{ omnia_template_name }}" --credentials "{{ credential_name }}" --monitor -f human
+          changed_when: false
+          register: command_output
+
+    - name: Validate success message
+      assert:
+        that: "'FAILED!' not in command_output.stdout"
+        success_msg: "{{ test_case_success_msg }}"
+        fail_msg: "{{ test_case_failure_msg }}"
+      tags: TC_012
+
+    - name: Delete the hosts
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host1 }} --monitor -f human
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host2 }} --monitor -f human
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host3 }} --monitor -f human
+      changed_when: false
+      tags: TC_012
+
+    - name: Delete the inventory file
+      ignore_errors: yes
+      file:
+        state: absent
+        path: testinventory.yml
+      tags: TC_012
+
+    - name: Create inventory file if it doesn't exist
+      ignore_errors: yes
+      file:
+        path: "testinventory.yml"
+        state: touch
+        mode: '{{ file_permission }}'
+      tags: TC_012
+
+# Testcase OMNIA_CRM_US_AWXD_TC_013
+# Test case to validate the error meesage when a host is present in both manager and compute groups
+- name: OMNIA_CRM_US_AWXD_TC_013
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_omnia_vars.yml
+  tasks:
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_013
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_filename }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_013
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_013
+
+    - name: Creating inventory file with hosts associated to the groups
+      copy:
+        dest: "testinventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+          ---
+          manager:
+            hosts:
+              {{ host1 }}
+
+          compute:
+            hosts:
+              {{ host1 }}
+      tags: TC_013
+
+    - name: Push the inventory to AWX
+      shell: |
+        set -o pipefail
+        docker exec awx_task awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source "{{ inventory_path }}/testinventory.yml"
+      changed_when: false
+      tags: TC_013
+
+    - block:
+        - name: Launch the job template
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+            job_templates launch "{{ omnia_template_name }}" --credentials "{{ credential_name }}" --monitor -f human
+          changed_when: false
+          register: command_output
+
+      rescue:
+        - name: Validate error message
+          assert:
+            that: "'FAILED!' in command_output.stdout"
+            success_msg: "{{ test_case_success_msg }}"
+            fail_msg: "{{ test_case_failure_msg }}"
+      tags: TC_013
+
+    - name: Delete the hosts
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host1 }} --monitor -f human
+      changed_when: false
+      tags: TC_013
+
+    - name: Delete the inventory file
+      ignore_errors: yes
+      file:
+        state: absent
+        path: testinventory.yml
+      tags: TC_013
+
+    - name: Create inventory file if it doesn't exist
+      ignore_errors: yes
+      file:
+        path: "testinventory.yml"
+        state: touch
+        mode: '{{ file_permission }}'
+      tags: TC_013
+
+# Testcase OMNIA_CRM_US_AWXD_TC_014
+# Test case to verify the disjunction validation when the hosts are disjoint
+- name: OMNIA_CRM_US_AWXD_TC_014
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_omnia_vars.yml
+  tasks:
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_014
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_filename }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_014
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_014
+
+    - name: Creating inventory file with hosts associated to the groups
+      copy:
+        dest: "testinventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+          ---
+          manager:
+            hosts:
+              {{ host1 }}
+
+          compute:
+            hosts:
+              {{ host2 }}
+      tags: TC_014
+
+    - name: Push the inventory to AWX
+      shell: |
+        set -o pipefail
+        docker exec awx_task awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source "{{ inventory_path }}/testinventory.yml"
+      changed_when: false
+      tags: TC_014
+
+    - block:
+        - name: Launch the job template
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+            job_templates launch "{{ omnia_template_name }}" --credentials "{{ credential_name }}" --monitor -f human
+          changed_when: false
+          register: command_output
+
+    - name: Validate success message
+      assert:
+        that: "'FAILED!' not in command_output.stdout"
+        success_msg: "{{ test_case_success_msg }}"
+        fail_msg: "{{ test_case_failure_msg }}"
+      tags: TC_014
+
+    - name: Delete the hosts
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host1 }} --monitor -f human
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host2 }} --monitor -f human
+      changed_when: false
+      tags: TC_014
+
+    - name: Delete the inventory file
+      ignore_errors: yes
+      file:
+        state: absent
+        path: testinventory.yml
+      tags: TC_014
+
+    - name: Create inventory file if it doesn't exist
+      ignore_errors: yes
+      file:
+        path: "testinventory.yml"
+        state: touch
+        mode: '{{ file_permission }}'
+      tags: TC_014
+
+# Testcase OMNIA_CRM_US_AWXD_TC_015
+# Test case to validate whether the proper error message is displayed when more than one host is added to manager group
+- name: OMNIA_CRM_US_AWXD_TC_015
+  hosts: localhost
+  connection: local
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_omnia_vars.yml
+  tasks:
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_015
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_filename }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_015
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_015
+
+    - name: Creating inventory file with hosts associated to the groups
+      copy:
+        dest: "testinventory.yml"
+        mode: '{{ file_permission }}'
+        content: |
+          ---
+          compute:
+            hosts:
+              {{ host1 }}
+
+          manager:
+            hosts:
+              {{ host2 }}
+            hosts:
+              {{ host3 }}
+
+      tags: TC_015
+
+    - name: Push the inventory to AWX
+      shell: |
+        set -o pipefail
+        docker exec awx_task awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source "{{ inventory_path }}/testinventory.yml"
+      changed_when: false
+      tags: TC_015
+
+    - block:
+        - name: Launch the job template
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+            job_templates launch "{{ omnia_template_name }}" --credentials "{{ credential_name }}" --monitor -f human
+          changed_when: false
+          register: command_output
+
+      rescue:
+        - name: Validate error message
+          assert:
+            that: "'FAILED!' in command_output.stdout"
+            success_msg: "{{ test_case_success_msg }}"
+            fail_msg: "{{ test_case_failure_msg }}"
+      tags: TC_015
+
+    - name: Delete the hosts
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host1 }} --monitor -f human
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host2 }} --monitor -f human
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ awx_password }}" --conf.insecure
+        hosts delete {{ host3 }} --monitor -f human
+      changed_when: false
+      tags: TC_015
+
+    - name: Delete the inventory file
+      ignore_errors: yes
+      file:
+        state: absent
+        path: testinventory.yml
+      tags: TC_015
+
+    - name: Create inventory file if it doesn't exist
+      ignore_errors: yes
+      file:
+        path: "testinventory.yml"
+        state: touch
+        mode: '{{ file_permission }}'
+      tags: TC_015

+ 365 - 67
appliance/test/test_provision_cc.yml

@@ -13,37 +13,204 @@
 #  limitations under the License.
 ---
 
-# Testcase OMNIA_DIO_US_CC_TC_010
+# Testcase OMNIA_DIO_US_CC_TC_004
 # Execute provision role in management station and verify cobbler configuration
-- name: OMNIA_DIO_US_CC_TC_010
+- name: OMNIA_DIO_US_CC_TC_004
   hosts: localhost
   connection: local
   vars_files:
     - test_vars/test_provision_vars.yml
     - ../roles/provision/vars/main.yml
   tasks:
+    - name: Check the iso file is present
+      stat:
+        path: "{{ iso_file_path }}/{{ iso_name }}"
+      register: iso_status
+      tags: TC_004
+
+    - name: Fail if iso file is missing
+      fail:
+        msg: "{{ iso_fail }}"
+      when: iso_status.stat.exists == false
+      tags: TC_004
+
     - name: Delete the cobbler container if exits
       docker_container:
         name: "{{ docker_container_name }}"
         state: absent
-      tags: TC_010
+      tags: TC_004
 
     - name: Delete docker image if exists
       docker_image:
         name: "{{ docker_image_name }}"
         tag: "{{ docker_image_tag }}"
         state: absent
-      tags: TC_010
+      tags: TC_004
 
     - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
         - name: Call provision role
           include_role:
             name: ../roles/provision
+      tags: TC_004
+
+    - name: Check the connection to cobbler UI and it returns a status 200
+      uri:
+        url: https://localhost/cobbler_web
+        status_code: 200
+        return_content: yes
+        validate_certs: no
+      tags: TC_004,VERIFY_004
+
+    - name: Fetch cobbler version in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler version
+      changed_when: false
+      register: cobbler_version
+      tags: TC_004,VERIFY_004
+
+    - name: Verify cobbler version
+      assert:
+        that:
+          - "'Cobbler' in cobbler_version.stdout"
+          - "'Error' not in cobbler_version.stdout"
+        fail_msg: "{{ cobbler_version_fail_msg }}"
+        success_msg: "{{ cobbler_version_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Run cobbler check command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler check
+      changed_when: false
+      register: cobbler_check
+      tags: TC_004,VERIFY_004
+
+    - name: Verify cobbler check command output
+      assert:
+        that:
+          - "'The following are potential configuration items that you may want to fix' not in cobbler_check.stdout"
+          - "'Error' not in cobbler_check.stdout"
+        fail_msg: "{{ cobbler_check_fail_msg }}"
+        success_msg: "{{ cobbler_check_success_msg }}"
+      ignore_errors: yes
+      tags: TC_004,VERIFY_004
+
+    - name: Run cobbler sync command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler sync
+      changed_when: false
+      register: cobbler_sync
+      tags: TC_004,VERIFY_004
+
+    - name: Verify cobbler sync command output
+      assert:
+        that:
+          - "'TASK COMPLETE' in cobbler_sync.stdout"
+          - "'Fail' not in cobbler_sync.stdout"
+          - "'Error' not in cobbler_sync.stdout"
+        fail_msg: "{{ cobbler_sync_fail_msg }}"
+        success_msg: "{{ cobbler_sync_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Fetch cobbler distro list
+      command: docker exec {{ docker_container_name }} cobbler distro list
+      changed_when: false
+      register: cobbler_distro_list
+      tags: TC_004,VERIFY_004
+
+    - name: Verify cobbler distro list
+      assert:
+        that:
+          - "'CentOS' in cobbler_distro_list.stdout"
+        fail_msg: "{{ cobbler_distro_list_fail_msg }}"
+        success_msg: "{{ cobbler_distro_list_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Fetch cobbler profile list
+      command: docker exec cobbler cobbler profile list
+      changed_when: false
+      register: cobbler_profile_list
+      tags: TC_004,VERIFY_004
+
+    - name: Verify cobbler profile list
+      assert:
+        that:
+          - "'CentOS' in cobbler_profile_list.stdout"
+        fail_msg: "{{ cobbler_profile_list_fail_msg }}"
+        success_msg: "{{ cobbler_profile_list_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Check kickstart file
+      shell: |
+        docker exec {{ docker_container_name }} [ -f /var/lib/cobbler/kickstarts/{{ kickstart_filename }} ] && echo "File exist" || echo "File does not exist"
+      changed_when: false
+      register: kickstart_file_status
+      tags: TC_004,VERIFY_004
+
+    - name: Verify kickstart file present
+      assert:
+        that:
+          - "'File exist' in kickstart_file_status.stdout"
+        fail_msg: "{{ kickstart_file_fail_msg }}"
+        success_msg: "{{ kickstart_file_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Check crontab list
+      command: docker exec cobbler crontab -l
+      changed_when: false
+      register: crontab_list
+      tags: TC_004,VERIFY_004
+
+    - name: Verify crontab list
+      assert:
+        that:
+          - "'* * * * * ansible-playbook /root/tftp.yml' in crontab_list.stdout"
+          - "'5 * * * * ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
+        fail_msg: "{{ crontab_list_fail_msg }}"
+        success_msg: "{{ crontab_list_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Check tftp,dhcpd,xinetd,cobblerd service is running
+      command: docker exec cobbler systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: cobbler_service_check
+      with_items: "{{ cobbler_services }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
+      assert:
+        that:
+          - "'active' in cobbler_service_check.results[{{ item }}].stdout"
+          - "'inactive' not in cobbler_service_check.results[{{ item }}].stdout"
+          - "'unknown' not in cobbler_service_check.results[{{ item }}].stdout"
+        fail_msg: "{{ cobbler_service_check_fail_msg }}"
+        success_msg: "{{ cobbler_service_check_success_msg }}"
+      with_sequence: start=0 end=3
+      tags: TC_004,VERIFY_004
+
+# Testcase OMNIA_DIO_US_CDIP_TC_005
+# Execute provison role in management station where cobbler container is configured
+- name: OMNIA_DIO_US_CDIP_TC_005
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
           vars:
-            no_prompt: true
-            admin_password: "{{ boundary_password }}"
-            admin_password_confirm: "{{ boundary_password }}"
-      tags: TC_010
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+      tags: TC_005
 
     - name: Check the connection to cobbler UI and it returns a status 200
       uri:
@@ -51,13 +218,13 @@
         status_code: 200
         return_content: yes
         validate_certs: no
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Fetch cobbler version in cobbler container
       command: docker exec {{ docker_container_name }} cobbler version
       changed_when: false
       register: cobbler_version
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Verify cobbler version
       assert:
@@ -66,13 +233,13 @@
           - "'Error' not in cobbler_version.stdout"
         fail_msg: "{{ cobbler_version_fail_msg }}"
         success_msg: "{{ cobbler_version_success_msg }}"
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Run cobbler check command in cobbler container
       command: docker exec {{ docker_container_name }} cobbler check
       changed_when: false
       register: cobbler_check
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Verify cobbler check command output
       assert:
@@ -82,13 +249,13 @@
         fail_msg: "{{ cobbler_check_fail_msg }}"
         success_msg: "{{ cobbler_check_success_msg }}"
       ignore_errors: yes
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Run cobbler sync command in cobbler container
       command: docker exec {{ docker_container_name }} cobbler sync
       changed_when: false
       register: cobbler_sync
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Verify cobbler sync command output
       assert:
@@ -98,13 +265,13 @@
           - "'Error' not in cobbler_sync.stdout"
         fail_msg: "{{ cobbler_sync_fail_msg }}"
         success_msg: "{{ cobbler_sync_success_msg }}"
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Fetch cobbler distro list
       command: docker exec {{ docker_container_name }} cobbler distro list
       changed_when: false
       register: cobbler_distro_list
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Verify cobbler distro list
       assert:
@@ -112,13 +279,13 @@
           - "'CentOS' in cobbler_distro_list.stdout"
         fail_msg: "{{ cobbler_distro_list_fail_msg }}"
         success_msg: "{{ cobbler_distro_list_success_msg }}"
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Fetch cobbler profile list
       command: docker exec cobbler cobbler profile list
       changed_when: false
       register: cobbler_profile_list
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Verify cobbler profile list
       assert:
@@ -126,14 +293,14 @@
           - "'CentOS' in cobbler_profile_list.stdout"
         fail_msg: "{{ cobbler_profile_list_fail_msg }}"
         success_msg: "{{ cobbler_profile_list_success_msg }}"
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Check kickstart file
       shell: |
         docker exec {{ docker_container_name }} [ -f /var/lib/cobbler/kickstarts/{{ kickstart_filename }} ] && echo "File exist" || echo "File does not exist"
       changed_when: false
       register: kickstart_file_status
-      tags: TC_010
+      tags: TC_005,VERIFY_005
 
     - name: Verify kickstart file present
       assert:
@@ -141,11 +308,45 @@
           - "'File exist' in kickstart_file_status.stdout"
         fail_msg: "{{ kickstart_file_fail_msg }}"
         success_msg: "{{ kickstart_file_success_msg }}"
-      tags: TC_010
+      tags: TC_005,VERIFY_005
+
+    - name: Check crontab list
+      command: docker exec cobbler crontab -l
+      changed_when: false
+      register: crontab_list
+      tags: TC_005,VERIFY_005
+
+    - name: Verify crontab list
+      assert:
+        that:
+          - "'* * * * * ansible-playbook /root/tftp.yml' in crontab_list.stdout"
+          - "'5 * * * * ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
+        fail_msg: "{{ crontab_list_fail_msg }}"
+        success_msg: "{{ crontab_list_success_msg }}"
+      tags: TC_005,VERIFY_005
+
+    - name: Check tftp,dhcpd,xinetd,cobblerd service is running
+      command: docker exec cobbler systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: cobbler_service_check
+      with_items: "{{ cobbler_services }}"
+      tags: TC_005,VERIFY_005
 
-# Testcase OMNIA_DIO_US_CC_TC_011
+    - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
+      assert:
+        that:
+          - "'active' in cobbler_service_check.results[{{ item }}].stdout"
+          - "'inactive' not in cobbler_service_check.results[{{ item }}].stdout"
+          - "'unknown' not in cobbler_service_check.results[{{ item }}].stdout"
+        fail_msg: "{{ cobbler_service_check_fail_msg }}"
+        success_msg: "{{ cobbler_service_check_success_msg }}"
+      with_sequence: start=0 end=3
+      tags: TC_005,VERIFY_005
+
+# Testcase OMNIA_DIO_US_CC_TC_006
 # Execute provision role in management station where already one container present
-- name: OMNIA_DIO_US_CC_TC_011
+- name: OMNIA_DIO_US_CC_TC_006
   hosts: localhost
   connection: local
   vars_files:
@@ -156,21 +357,21 @@
       docker_container:
         name: "{{ docker_container_name }}"
         state: absent
-      tags: TC_011
+      tags: TC_006
 
     - name: Delete docker image if exists
       docker_image:
         name: "{{ docker_image_name }}"
         tag: "{{ docker_image_tag }}"
         state: absent
-      tags: TC_011
+      tags: TC_006
 
     - name: Create docker image
       docker_image:
         name: ubuntu
         tag: latest
         source: pull
-      tags: TC_011
+      tags: TC_006
 
     - name: Create docker container
       command: docker run -dit ubuntu
@@ -178,17 +379,19 @@
       changed_when: true
       args:
         warn: false
-      tags: TC_011
+      tags: TC_006
 
     - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
         - name: Call provision role
           include_role:
             name: ../roles/provision
-          vars:
-            no_prompt: true
-            admin_password: "{{ boundary_password }}"
-            admin_password_confirm: "{{ boundary_password }}"
-      tags: TC_011
+      tags: TC_006
 
     - name: Check the connection to cobbler UI and it returns a status 200
       uri:
@@ -196,13 +399,13 @@
         status_code: 200
         return_content: yes
         validate_certs: no
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Fetch cobbler version in cobbler container
       command: docker exec {{ docker_container_name }} cobbler version
       changed_when: false
       register: cobbler_version
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Verify cobbler version
       assert:
@@ -211,13 +414,13 @@
           - "'Error' not in cobbler_version.stdout"
         fail_msg: "{{ cobbler_version_fail_msg }}"
         success_msg: "{{ cobbler_version_success_msg }}"
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Run cobbler check command in cobbler container
       command: docker exec {{ docker_container_name }} cobbler check
       changed_when: false
       register: cobbler_check
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Verify cobbler check command output
       assert:
@@ -227,13 +430,13 @@
         fail_msg: "{{ cobbler_check_fail_msg }}"
         success_msg: "{{ cobbler_check_success_msg }}"
       ignore_errors: yes
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Run cobbler sync command in cobbler container
       command: docker exec {{ docker_container_name }} cobbler sync
       changed_when: false
       register: cobbler_sync
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Verify cobbler sync command output
       assert:
@@ -243,13 +446,13 @@
           - "'Error' not in cobbler_sync.stdout"
         fail_msg: "{{ cobbler_sync_fail_msg }}"
         success_msg: "{{ cobbler_sync_success_msg }}"
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Fetch cobbler distro list
       command: docker exec {{ docker_container_name }} cobbler distro list
       changed_when: false
       register: cobbler_distro_list
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Verify cobbler distro list
       assert:
@@ -257,13 +460,13 @@
           - "'CentOS' in cobbler_distro_list.stdout"
         fail_msg: "{{ cobbler_distro_list_fail_msg }}"
         success_msg: "{{ cobbler_distro_list_success_msg }}"
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Fetch cobbler profile list
       command: docker exec cobbler cobbler profile list
       changed_when: false
       register: cobbler_profile_list
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Verify cobbler profile list
       assert:
@@ -271,14 +474,14 @@
           - "'CentOS' in cobbler_profile_list.stdout"
         fail_msg: "{{ cobbler_profile_list_fail_msg }}"
         success_msg: "{{ cobbler_profile_list_success_msg }}"
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Check kickstart file
       shell: |
         docker exec {{ docker_container_name }} [ -f /var/lib/cobbler/kickstarts/{{ kickstart_filename }} ] && echo "File exist" || echo "File does not exist"
       changed_when: false
       register: kickstart_file_status
-      tags: TC_011
+      tags: TC_006,VERIFY_006
 
     - name: Verify kickstart file present
       assert:
@@ -286,23 +489,57 @@
           - "'File exist' in kickstart_file_status.stdout"
         fail_msg: "{{ kickstart_file_fail_msg }}"
         success_msg: "{{ kickstart_file_success_msg }}"
-      tags: TC_011
+      tags: TC_006,VERIFY_006
+
+    - name: Check crontab list
+      command: docker exec cobbler crontab -l
+      changed_when: false
+      register: crontab_list
+      tags: TC_006,VERIFY_006
+
+    - name: Verify crontab list
+      assert:
+        that:
+          - "'* * * * * ansible-playbook /root/tftp.yml' in crontab_list.stdout"
+          - "'5 * * * * ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
+        fail_msg: "{{ crontab_list_fail_msg }}"
+        success_msg: "{{ crontab_list_success_msg }}"
+      tags: TC_006,VERIFY_006
+
+    - name: Check tftp,dhcpd,xinetd,cobblerd service is running
+      command: docker exec cobbler systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: cobbler_service_check
+      with_items: "{{ cobbler_services }}"
+      tags: TC_006,VERIFY_006
+
+    - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
+      assert:
+        that:
+          - "'active' in cobbler_service_check.results[{{ item }}].stdout"
+          - "'inactive' not in cobbler_service_check.results[{{ item }}].stdout"
+          - "'unknown' not in cobbler_service_check.results[{{ item }}].stdout"
+        fail_msg: "{{ cobbler_service_check_fail_msg }}"
+        success_msg: "{{ cobbler_service_check_success_msg }}"
+      with_sequence: start=0 end=3
+      tags: TC_006,VERIFY_006
 
     - name: Delete the ubuntu container
       docker_container:
         name: "{{ create_docker_container.stdout }}"
         state: absent
-      tags: TC_011
+      tags: TC_006
 
     - name: Delete the ubuntu umage
       docker_image:
         name: ubuntu
         state: absent
-      tags: TC_011
+      tags: TC_006
 
-# Testcase OMNIA_DIO_US_CC_TC_012
+# Testcase OMNIA_DIO_US_CC_TC_007
 # Execute provision role in management station and reboot management station
-- name: OMNIA_DIO_US_CC_TC_012
+- name: OMNIA_DIO_US_CC_TC_007
   hosts: localhost
   connection: local
   vars_files:
@@ -310,54 +547,115 @@
     - ../roles/provision/vars/main.yml
   tasks:
     - name: Check last uptime of the server
-      shell: |
-        current_time=$(date +"%Y-%m-%d %H")
-        uptime -s | grep "$current_time"
+      command: uptime -s
       register: uptime_status
       changed_when: false
       ignore_errors: yes
-      tags: TC_012
+      tags: TC_007
+
+    - name: Check current date
+      command: date +"%Y-%m-%d %H"
+      register: current_time
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_007
 
     - name: Delete the cobbler container if exits
       docker_container:
         name: "{{ docker_container_name }}"
         state: absent
-      when: uptime_status.stdout|length < 1
-      tags: TC_012
+      when: current_time.stdout not in uptime_status.stdout
+      tags: TC_007
 
     - name: Delete docker image if exists
       docker_image:
         name: "{{ docker_image_name }}"
         tag: "{{ docker_image_tag }}"
         state: absent
-      when: uptime_status.stdout|length < 1
-      tags: TC_012
+      when: current_time.stdout not in uptime_status.stdout
+      tags: TC_007
 
     - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
         - name: Call provision role
           include_role:
             name: ../roles/provision
-          vars:
-            no_prompt: true
-            admin_password: "{{ boundary_password }}"
-            admin_password_confirm: "{{ boundary_password }}"
-      when: uptime_status.stdout|length < 1
-      tags: TC_012
+      when: current_time.stdout not in uptime_status.stdout
+      tags: TC_007
 
     - name: Reboot localhost
       command: reboot
-      when: uptime_status.stdout|length < 1
-      tags: TC_012
+      when: current_time.stdout not in uptime_status.stdout
+      tags: TC_007
 
     - name: Inspect cobbler container
       docker_container_info:
         name: "{{ docker_container_name }}"
       register: cobbler_cnt_status
-      tags: TC_012
+      tags: TC_007,VERIFY_007
 
     - name: Verify cobbler container is running after reboot
       assert:
         that: "'running' in cobbler_cnt_status.container.State.Status"
         fail_msg: "{{ cobbler_reboot_fail_msg }}"
         success_msg: "{{ cobbler_reboot_success_msg }}"
-      tags: TC_012
+      tags: TC_007,VERIFY_007
+
+# Testcase OMNIA_DIO_US_CC_TC_008
+# Execute provison role in management station with centos iso file not present in files folder of provision role
+- name: OMNIA_DIO_US_CC_TC_008
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Check the iso file is present
+      stat:
+        path: "{{ iso_file_path }}/{{ iso_name }}"
+      register: iso_status
+      tags: TC_008
+
+    - name: Copy iso file to different name
+      copy:
+        src: "{{ iso_file_path }}/{{ iso_name }}"
+        dest: "{{ iso_file_path }}/{{ temp_iso_name }}"
+      when: iso_status.stat.exists == true
+      tags: TC_008
+
+    - name: Delete iso file
+      file:
+        path: "{{ iso_file_path }}/{{ iso_name }}"
+        state: "absent"
+      when: iso_status.stat.exists == true
+      tags: TC_008
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+      rescue:
+        - name: Validate iso missing error
+          assert:
+            that: iso_fail in iso_file_check.msg
+            success_msg: "{{ iso_check_success_msg }}"
+            fail_msg: "{{ iso_check_fail_msg }}"
+      tags: TC_008
+
+    - name: Copy iso file to old name
+      copy:
+        src: "{{ iso_file_path }}/{{ temp_iso_name }}"
+        dest: "{{ iso_file_path }}/{{ iso_name }}"
+      when: iso_status.stat.exists == true
+      tags: TC_008

+ 75 - 350
appliance/test/test_provision_cdip.yml

@@ -14,7 +14,7 @@
 ---
 
 # Testcase OMNIA_DIO_US_CDIP_TC_001
-# Execute provison role in management station with cobbler as empty
+# Execute provison role in management station with os installed centos 7
 - name: OMNIA_DIO_US_CDIP_TC_001
   hosts: localhost
   connection: local
@@ -36,320 +36,25 @@
       tags: TC_001
 
     - block:
-        - name: Test cobbler password with empty string
+        - name: Call common role
           include_role:
-            name: ../roles/provision
-            tasks_from: "{{ item }}"
-          with_items:
-           - "{{ cobbler_image_files }}"
-          vars:
-            no_prompt: true
-            admin_password: "{{ empty_password }}"
-            admin_password_confirm: "{{ empty_password }}"
-      rescue:
-        - name: Validate failure message
-          assert:
-            that: fail_msg_pwd_format in msg_pwd_format.msg
-            success_msg: "{{ validate_password_success_msg }}"
-            fail_msg: "{{ validate_password_fail_msg }}"
-      tags: TC_001
-
-# Testcase OMNIA_DIO_US_CDIP_TC_002
-# Execute provison role in management station with cobbler password of length 8 characters
-- name: OMNIA_DIO_US_CDIP_TC_002
-  hosts: localhost
-  connection: local
-  vars_files:
-    - test_vars/test_provision_vars.yml
-    - ../roles/provision/vars/main.yml
-  tasks:
-    - name: Delete the cobbler container if exits
-      docker_container:
-        name: "{{ docker_container_name }}"
-        state: absent
-      tags: TC_002
-
-    - name: Delete docker image if exists
-      docker_image:
-        name: "{{ docker_image_name }}"
-        tag: "{{ docker_image_tag }}"
-        state: absent
-      tags: TC_002
-
-    - block:
-        - name: Test cobbler password with 8 characters
-          include_role:
-            name: ../roles/provision
-            tasks_from: "{{ item }}"
-          with_items:
-           - "{{ cobbler_image_files }}"
-          vars:
-            no_prompt: true
-            admin_password: "{{ boundary_password }}"
-            admin_password_confirm: "{{ boundary_password }}"
-      always:
-        - name: Validate success message
-          assert:
-            that:  success_msg_pwd_format in msg_pwd_format.msg
-            success_msg: "{{ validate_password_success_msg }}"
-            fail_msg: "{{ validate_password_fail_msg }}"
-      tags: TC_002
-
-# Testcase OMNIA_DIO_US_CDIP_TC_003
-# Execute provison role in management station with cobbler password of length greather than 15 characters
-- name: OMNIA_DIO_US_CDIP_TC_003
-  hosts: localhost
-  connection: local
-  vars_files:
-    - test_vars/test_provision_vars.yml
-    - ../roles/provision/vars/main.yml
-  tasks:
-    - name: Delete the cobbler container if exits
-      docker_container:
-        name: "{{ docker_container_name }}"
-        state: absent
-      tags: TC_003
-
-    - name: Delete docker image if exists
-      docker_image:
-        name: "{{ docker_image_name }}"
-        tag: "{{ docker_image_tag }}"
-        state: absent
-      tags: TC_003
-
-    - block:
-        - name: Test cobbler password with lengthy string
-          include_role:
-             name: ../roles/provision
-             tasks_from: "{{ item }}"
-          with_items:
-           - "{{ cobbler_image_files }}"
-          vars:
-            no_prompt: true
-            admin_password: "{{ lengthy_password }}"
-            admin_password_confirm: "{{ lengthy_password }}"
-      always:
-        - name: Validate success message
-          assert:
-            that:  success_msg_pwd_format in msg_pwd_format.msg
-            success_msg: "{{ validate_password_success_msg }}"
-            fail_msg: "{{ validate_password_fail_msg }}"
-      tags: TC_003
-
-# Testcase OMNIA_DIO_US_CDIP_TC_004
-# Execute provison role in management station with cobbler password contains white spaces
-- name: OMNIA_DIO_US_CDIP_TC_004
-  hosts: localhost
-  connection: local
-  vars_files:
-    - test_vars/test_provision_vars.yml
-    - ../roles/provision/vars/main.yml
-  tasks:
-    - name: Delete the cobbler container if exits
-      docker_container:
-        name: "{{ docker_container_name }}"
-        state: absent
-      tags: TC_004
-
-    - name: Delete docker image if exists
-      docker_image:
-        name: "{{ docker_image_name }}"
-        tag: "{{ docker_image_tag }}"
-        state: absent
-      tags: TC_004
-
-    - block:
-        - name: Test cobbler password with string contains white space
-          include_role:
-            name: ../roles/provision
-            tasks_from: "{{ item }}"
-          with_items:
-           - "{{ cobbler_image_files }}"
+            name: ../roles/common
           vars:
-            no_prompt: true
-            admin_password: "{{ whitespace_password }}"
-            admin_password_confirm: "{{ whitespace_password }}"
-      always:
-        - name: Validate success message
-          assert:
-            that:  success_msg_pwd_format in msg_pwd_format.msg
-            success_msg: "{{ validate_password_success_msg }}"
-            fail_msg: "{{ validate_password_fail_msg }}"
-      tags: TC_004
+            input_config_filename: "{{ test_input_config_filename }}"
 
-# Testcase OMNIA_DIO_US_CDIP_TC_005
-# Execute provison role in management station with cobbler password as string with special characters
-- name: OMNIA_DIO_US_CDIP_TC_005
-  hosts: localhost
-  connection: local
-  vars_files:
-    - test_vars/test_provision_vars.yml
-    - ../roles/provision/vars/main.yml
-  tasks:
-    - name: Delete the cobbler container if exits
-      docker_container:
-        name: "{{ docker_container_name }}"
-        state: absent
-      tags: TC_005
-
-    - name: Delete docker image if exists
-      docker_image:
-        name: "{{ docker_image_name }}"
-        tag: "{{ docker_image_tag }}"
-        state: absent
-      tags: TC_005
-
-    - block:
-        - name: Test cobbler password with string contains special characters
-          include_role:
-            name: ../roles/provision
-            tasks_from: "{{ item }}"
-          with_items:
-           - "{{ cobbler_image_files }}"
-          vars:
-            no_prompt: true
-            admin_password: "{{ special_character_password }}"
-            admin_password_confirm: "{{ special_character_password }}"
-      always:
-        - name: Validate success message
-          assert:
-            that:  success_msg_pwd_format in msg_pwd_format.msg
-            success_msg: "{{ validate_password_success_msg }}"
-            fail_msg: "{{ validate_password_success_msg }}"
-      tags: TC_005
-
-# Testcase OMNIA_DIO_US_CDIP_TC_006
-# Execute provison role in management station with cobbler password and cobbler password confirm having unequal values
-- name: OMNIA_DIO_US_CDIP_TC_006
-  hosts: localhost
-  connection: local
-  vars_files:
-    - test_vars/test_provision_vars.yml
-    - ../roles/provision/vars/main.yml
-  tasks:
-    - name: Delete the cobbler container if exits
-      docker_container:
-        name: "{{ docker_container_name }}"
-        state: absent
-      tags: TC_006
-
-    - name: Delete docker image if exists
-      docker_image:
-        name: "{{ docker_image_name }}"
-        tag: "{{ docker_image_tag }}"
-        state: absent
-      tags: TC_006
-
-    - block:
-        - name: Test cobbler password with unequal values
-          include_role:
-            name: ../roles/provision
-            tasks_from: "{{ item }}"
-          with_items:
-           - "{{ cobbler_image_files }}"
-          vars:
-            no_prompt: true
-            admin_password: "{{ boundary_password }}"
-            admin_password_confirm: "{{ lengthy_password }}"
-      rescue:
-        - name: Validate failure message
-          assert:
-            that:  fail_msg_pwd_confirm in msg_pwd_confirm.msg
-            success_msg: "{{ validate_password_success_msg }}"
-            fail_msg: "{{ validate_password_success_msg }}"
-      tags: TC_006
-
-# Testcase OMNIA_DIO_US_CDIP_TC_007
-# Execute provison role in management station where docker service not running
-- name: OMNIA_DIO_US_CDIP_TC_007
-  hosts: localhost
-  connection: local
-  vars_files:
-    - test_vars/test_provision_vars.yml
-    - ../roles/provision/vars/main.yml
-  tasks:
-    - name: Delete the cobbler container if exits
-      docker_container:
-        name: "{{ docker_container_name }}"
-        state: absent
-      tags: TC_007
-
-    - name: Delete docker image if exists
-      docker_image:
-        name: "{{ docker_image_name }}"
-        tag: "{{ docker_image_tag }}"
-        state: absent
-      tags: TC_007
-
-    - name: Stop docker service
-      service:
-        name: docker
-        state: stopped
-      tags: TC_007
-
-    - block:
-        - name: Call provision role
-          include_role:
-            name: ../roles/provision
-          vars:
-            no_prompt: true
-            admin_password: "{{ boundary_password }}"
-            admin_password_confirm: "{{ boundary_password }}"
-
-        - name: Docker service stopped usecase fail message
-          fail:
-            msg: "{{ docker_check_fail_msg }}"
-      rescue:
-        - name: Docker service stopped usecase success message
-          debug:
-            msg: "{{ docker_check_success_msg }}"
-      always:
-        - name: Start docker service
-          service:
-            name: docker
-            state: started
-      tags: TC_007
-
-# Testcase OMNIA_DIO_US_CDIP_TC_008
-# Execute provison role in management station with os installed centos 8.2
-- name: OMNIA_DIO_US_CDIP_TC_008
-  hosts: localhost
-  connection: local
-  vars_files:
-    - test_vars/test_provision_vars.yml
-    - ../roles/provision/vars/main.yml
-  tasks:
-    - name: Delete the cobbler container if exits
-      docker_container:
-        name: "{{ docker_container_name }}"
-        state: absent
-      tags: TC_008
-
-    - name: Delete docker image if exists
-      docker_image:
-        name: "{{ docker_image_name }}"
-        tag: "{{ docker_image_tag }}"
-        state: absent
-      tags: TC_008
-
-    - block:
         - name: Call provision role
           include_role:
             name: ../roles/provision
             tasks_from: "{{ item }}"
           with_items:
            - "{{ cobbler_image_files }}"
-          vars:
-            no_prompt: true
-            admin_password: "{{ boundary_password }}"
-            admin_password_confirm: "{{ boundary_password }}"
-      tags: TC_008
+      tags: TC_001
 
     - name: Inspect cobbler docker image
       docker_image_info:
         name: "{{ docker_image_name }}"
       register: cobbler_image_status
-      tags: TC_008
+      tags: TC_001,VERIFY_001
 
     - name: Validate cobbler docker image
       assert:
@@ -357,13 +62,13 @@
           - cobbler_image_status.images
         fail_msg: "{{ cobbler_img_fail_msg }}"
         success_msg: "{{ cobbler_img_success_msg }}"
-      tags: TC_008
+      tags: TC_001,VERIFY_001
 
     - name: Inspect cobbler container
       docker_container_info:
         name: "{{ docker_container_name }}"
       register: cobbler_cnt_status
-      tags: TC_008
+      tags: TC_001,VERIFY_001
 
     - name: Validate cobbler docker container
       assert:
@@ -371,29 +76,11 @@
           - cobbler_cnt_status.exists
         fail_msg: "{{ cobbler_cnt_fail_msg }}"
         success_msg: "{{ cobbler_cnt_success_msg }}"
-      tags: TC_008
+      tags: TC_001,VERIFY_001
 
-    - name: Validate first NIC is not assigned to public internet
-      shell: |
-        set -o pipefail
-        ip route get 8.8.8.8 | awk '{print $5}'
-      register: nic_output
-      args:
-        executable: /bin/bash
-      failed_when: first_nic in nic_output.stdout
-      changed_when: false
-      tags: TC_008
-
-    - name: "Validate NIC-1 is assigned to IP {{ nic1_ip_address }}"
-      assert:
-        that: "'{{ nic1_ip_address }}' in ansible_eno1.ipv4.address"
-        fail_msg: "{{ nic_check_fail_msg }}"
-        success_msg: "{{ nic_check_success_msg }}"
-      tags: TC_008
-
-# Testcase OMNIA_DIO_US_CDIP_TC_009
+# Testcase OMNIA_DIO_US_CDIP_TC_002
 # Execute provison role in management station where cobbler container and image already created
-- name: OMNIA_DIO_US_CDIP_TC_009
+- name: OMNIA_DIO_US_CDIP_TC_002
   hosts: localhost
   connection: local
   vars_files:
@@ -401,21 +88,22 @@
     - ../roles/provision/vars/main.yml
   tasks:
     - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
         - name: Call provision role
           include_role:
             name: ../roles/provision
-          vars:
-            no_prompt: true
-            username: "{{ cobbler_username }}"
-            admin_password: "{{ boundary_password }}"
-            admin_password_confirm: "{{ boundary_password }}"
-      tags: TC_009
+      tags: TC_002
 
     - name: Inspect cobbler docker image
       docker_image_info:
         name: "{{ docker_image_name }}"
       register: cobbler_image_status
-      tags: TC_009
+      tags: TC_002,VERIFY_002
 
     - name: Validate cobbler docker image
       assert:
@@ -423,13 +111,13 @@
           - cobbler_image_status.images
         fail_msg: "{{ cobbler_img_fail_msg }}"
         success_msg: "{{ cobbler_img_success_msg }}"
-      tags: TC_009
+      tags: TC_002,VERIFY_002
 
     - name: Inspect cobbler container
       docker_container_info:
         name: "{{ docker_container_name }}"
       register: cobbler_cnt_status
-      tags: TC_009
+      tags: TC_002,VERIFY_002
 
     - name: Validate cobbler docker container
       assert:
@@ -437,22 +125,59 @@
           - cobbler_cnt_status.exists
         fail_msg: "{{ cobbler_cnt_fail_msg }}"
         success_msg: "{{ cobbler_cnt_success_msg }}"
-      tags: TC_009
+      tags: TC_002,VERIFY_002
 
-    - name: Validate first NIC is not assigned to public internet
-      shell: |
-        set -o pipefail
-        ip route get 8.8.8.8 | awk '{print $5}'
-      register: nic_output
-      args:
-        executable: /bin/bash
-      failed_when: first_nic in nic_output.stdout
-      changed_when: false
-      tags: TC_009
+# Testcase OMNIA_DIO_US_CDIP_TC_003
+# Execute provison role in management station where docker service not running
+- name: OMNIA_DIO_US_CDIP_TC_003
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_003
 
-    - name: "Validate NIC-1 is assigned to IP {{ nic1_ip_address }}"
-      assert:
-        that: "'{{ nic1_ip_address }}' in ansible_eno1.ipv4.address"
-        fail_msg: "{{ nic_check_fail_msg }}"
-        success_msg: "{{ nic_check_success_msg }}"
-      tags: TC_009
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_003
+
+    - name: Stop docker service
+      service:
+        name: docker
+        state: stopped
+      tags: TC_003
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+
+        - name: Docker service stopped usecase success message
+          debug:
+            msg: "{{ docker_check_success_msg }}"
+
+      rescue:
+        - name: Docker service stopped usecase fail message
+          fail:
+            msg: "{{ docker_check_fail_msg }}"
+
+      always:
+        - name: Start docker service
+          service:
+            name: docker
+            state: started
+      tags: TC_003

+ 90 - 41
appliance/test/test_provision_ndod.yml

@@ -13,44 +13,61 @@
 #  limitations under the License.
 ---
 
-# OMNIA_DIO_US_NDOD_TC_013
-# Execute provison role in management station and  PXE boot one compute node 
-- name: OMNIA_DIO_US_NDOD_TC_013
+# OMNIA_DIO_US_NDOD_TC_009
+# Execute provison role in management station and  PXE boot one compute node
+- name: OMNIA_DIO_US_NDOD_TC_009
   hosts: localhost
   connection: local
   gather_subset:
     - 'min'
   vars_files:
     - test_vars/test_provision_vars.yml
+    - ../roles/common/vars/main.yml
   tasks:
     - name: Set ip address of the compute node
       set_fact:
         single_node_ip_address: "{{ groups[cobbler_groupname][0] }}"
-      tags: TC_013
+      tags: TC_009,VERIFY_009
 
     - name: Delete inventory if exists
       file:
         path: inventory
         state: absent
-      tags: TC_013
+      tags: TC_009,VERIFY_009
+
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_009,VERIFY_009
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_path }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_009,VERIFY_009
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_009,VERIFY_009
 
     - name: Create inventory file
       lineinfile:
         path: inventory
-        line: "{{ single_node_ip_address }} ansible_user=root ansible_password={{ boundary_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
+        line: "{{ single_node_ip_address }} ansible_user=root ansible_password={{ provision_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
         create: yes
         mode: '{{ file_permission }}'
-      tags: TC_013
+      tags: TC_009,VERIFY_009
 
     - meta: refresh_inventory
-      tags: TC_013
+      tags: TC_009,VERIFY_009
 
     - name: Validate authentication of username and password
       command: ansible {{ single_node_ip_address }} -m ping -i inventory
       register: validate_login
       changed_when: false
       ignore_errors: yes
-      tags: TC_013
+      tags: TC_009,VERIFY_009
 
     - name: Validate the authentication output
       assert:
@@ -60,31 +77,31 @@
           - "'UNREACHABLE' not in validate_login.stdout"
         fail_msg: "{{ authentication_fail_msg }}"
         success_msg: "{{ authentication_success_msg }}"
-      tags: TC_013
+      tags: TC_009,VERIFY_009
 
     - name: Check hostname
       command: ansible {{ single_node_ip_address }} -m shell -a hostname -i inventory
       register: validate_hostname
       changed_when: false
       ignore_errors: yes
-      tags: TC_013
+      tags: TC_009,VERIFY_009
 
     - name: Validate the hostname
       assert:
         that: "'localhost' not in validate_hostname.stdout"
         fail_msg: "{{ hostname_fail_msg }}"
         success_msg: "{{ hostname_success_msg }}"
-      tags: TC_013
+      tags: TC_009,VERIFY_009
 
     - name: Delete inventory if exists
       file:
         path: inventory
         state: absent
-      tags: TC_013
+      tags: TC_009,VERIFY_009
 
-# OMNIA_DIO_US_NDOD_TC_014
+# OMNIA_DIO_US_NDOD_TC_010
 # Execute provison role in management station and PXE boot two compute node
-- name: OMNIA_DIO_US_NDOD_TC_014
+- name: OMNIA_DIO_US_NDOD_TC_010
   hosts: localhost
   connection: local
   gather_subset:
@@ -97,7 +114,23 @@
       file:
         path: inventory
         state: absent
-      tags: TC_014
+      tags: TC_010,VERIFY_010
+
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_010,VERIFY_010
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_path }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_010,VERIFY_010
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_010,VERIFY_010
 
     - name: Create inventory file
       lineinfile:
@@ -105,18 +138,18 @@
         line: "[nodes]"
         create: yes
         mode: '{{ file_permission }}'
-      tags: TC_014
+      tags: TC_010,VERIFY_010
 
     - name: Edit inventory file
       lineinfile:
         path: inventory
-        line: "{{ item }} ansible_user=root ansible_password={{ boundary_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
+        line: "{{ item }} ansible_user=root ansible_password={{ provision_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
       with_items:
         - "{{ groups[cobbler_groupname] }}"
-      tags: TC_014
+      tags: TC_010,VERIFY_010
 
     - meta: refresh_inventory
-      tags: TC_014
+      tags: TC_010,VERIFY_010
 
     - name: Validate ip address is different for both servers
       assert:
@@ -125,14 +158,14 @@
         success_msg: "{{ ip_address_success_msg }}"
       delegate_to: localhost
       run_once: yes
-      tags: TC_014
+      tags: TC_010,VERIFY_010
 
     - name: Check hostname of both servers
       command: ansible nodes -m shell -a hostname -i inventory
       register: node_hostname
       changed_when: false
       ignore_errors: yes
-      tags: TC_014
+      tags: TC_010,VERIFY_010
 
     - name: Validate hostname is different for both servers
       assert:
@@ -144,7 +177,7 @@
         success_msg: "{{ hostname_success_msg }}"
       delegate_to: localhost
       run_once: yes
-      tags: TC_014
+      tags: TC_010,VERIFY_010
 
     - name: Delete inventory if exists
       file:
@@ -152,11 +185,11 @@
         state: absent
       delegate_to: localhost
       run_once: yes
-      tags: TC_014
+      tags: TC_010,VERIFY_010
 
-# OMNIA_DIO_US_NDOD_TC_015
+# OMNIA_DIO_US_NDOD_TC_011
 # Validate passwordless ssh connection established or not with compute nodes
-- name: OMNIA_DIO_US_NDOD_TC_015
+- name: OMNIA_DIO_US_NDOD_TC_011
   hosts: localhost
   gather_subset:
     - 'min'
@@ -165,11 +198,11 @@
     - ../roles/provision/vars/main.yml
   tasks:
     - name: Validate authentication of username and password
-      command: "ansible {{ cobbler_groupname }} -m ping -i cobbler_inventory"
+      command: "ansible {{ cobbler_groupname }} -m ping -i {{ inventory_file }}"
       register: validate_login
       changed_when: false
       ignore_errors: yes
-      tags: TC_015
+      tags: TC_011,VERIFY_011
 
     - name: Validate the passwordless SSH connection
       assert:
@@ -179,11 +212,11 @@
           - "'UNREACHABLE' not in validate_login.stdout"
         success_msg: "{{ authentication_success_msg }}"
         fail_msg: "{{ authentication_fail_msg }}"
-      tags: TC_015
+      tags: TC_011,VERIFY_011
 
-# OMNIA_DIO_US_NDOD_TC_016
+# OMNIA_DIO_US_NDOD_TC_012
 # Execute provison role in management station and reboot compute node after os provision again
-- name: OMNIA_DIO_US_NDOD_TC_016
+- name: OMNIA_DIO_US_NDOD_TC_012
   hosts: localhost
   connection: local
   gather_subset:
@@ -194,13 +227,29 @@
     - name: Set ip address of the compute node
       set_fact:
         single_node_ip_address: "{{ groups[cobbler_groupname][0] }}"
-      tags: TC_016
+      tags: TC_012,VERIFY_012
 
     - name: Delete inventory if exists
       file:
         path: inventory
         state: absent
-      tags: TC_016
+      tags: TC_012,VERIFY_012
+
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_012,VERIFY_012
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_path }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_012,VERIFY_012
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_012,VERIFY_012
 
     - name: Create inventory file
       lineinfile:
@@ -208,38 +257,38 @@
         line: "[nodes]"
         create: yes
         mode: '{{ file_permission }}'
-      tags: TC_016
+      tags: TC_012,VERIFY_012
 
     - name: Edit inventory file
       lineinfile:
         path: inventory
-        line: "{{ single_node_ip_address }} ansible_user=root ansible_password={{ boundary_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
-      tags: TC_016
+        line: "{{ single_node_ip_address }} ansible_user=root ansible_password={{ provision_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
+      tags: TC_012,VERIFY_012
 
     - meta: refresh_inventory
-      tags: TC_016
+      tags: TC_012,VERIFY_012
 
     - name: Reboot servers
       command: ansible nodes -m command -a reboot -i inventory
       ignore_errors: yes
       changed_when: true
-      tags: TC_016
+      tags: TC_012,VERIFY_012
 
     - name: Wait for 10 minutes
       pause:
         minutes: 10
-      tags: TC_016
+      tags: TC_012,VERIFY_012
 
     - name: Check ip address of servers
       command: ansible nodes -m command -a 'ip a' -i inventory
       ignore_errors: yes
       changed_when: false
       register: ip_address_after_reboot
-      tags: TC_016
+      tags: TC_012,VERIFY_012
 
     - name: Validate ip address is same after reboot
       assert:
         that: "'{{ single_node_ip_address }}' in ip_address_after_reboot.stdout"
         fail_msg: "{{ ip_address_fail_msg }}"
         success_msg: "{{ ip_address_success_msg }}"
-      tags: TC_016
+      tags: TC_012,VERIFY_012

+ 22 - 11
appliance/test/test_vars/test_common_vars.yml

@@ -14,24 +14,35 @@
 ---
 
 # vars file for test_common.yml file
-docker_volume_fail_msg: "Docker volume omnia-storage does not exist"
-
-docker_volume_success_msg: "Docker volume omnia-storage exists"
-
 centos_version: '7.8'
+test_input_config_filename: "appliance_config_test.yml"
+empty_input_config_filename: "appliance_config_empty.yml"
+new_input_config_filename: "appliance_config_new.yml"
+password_config_file: "password_config"
+min_length_password: "testpass"
+max_length_password: "helloworld123helloworld12hello"
+long_password: "helloworld123hellowordl12hello3"
+white_space_password: "hello world 123"
+special_character_password1: "hello-world/"
+special_character_password2: "hello@$%!world"
+valid_dhcp_start_range: "172.17.0.10"
+valid_dhcp_end_range: "172.17.0.200"
+invalid_dhcp_ip: "1720.1700.1000.1000"
+wrong_dhcp_ip: "d6:dh1:dsj:10"
 
+docker_volume_success_msg: "Docker volume omnia-storage exists"
+docker_volume_fail_msg: "Docker volume omnia-storage does not exist"
+input_config_success_msg: "Input config file is encrypted using ansible-vault successfully"
+input_config_fail_msg: "Input config file is failed to encrypt using ansible-vault"
 os_check_success_msg: "OS check passed"
-
 os_check_fail_msg: "OS check failed"
-
 internet_check_success_msg: "Internet connectivity check passed"
-
 internet_check_fail_msg: "Internet connectivity check failed"
-
 different_user_check_success_msg: "Different user execution check passed"
-
 different_user_check_fail_msg: "Different user execution check failed"
-
 selinux_check_success_msg: "selinux check passed"
-
 selinux_check_fail_msg: "selinux check failed"
+input_config_check_success_msg: "appliance_config.yml validation passed"
+input_config_check_fail_msg: "appliance_config.yml validation failed"
+install_package_success_msg: "Installation of package is successful"
+install_package_fail_msg: "Installation of package is failed"

+ 24 - 0
appliance/test/test_vars/test_omnia_vars.yml

@@ -0,0 +1,24 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Usage: test_omnia.yml
+host1: "100.10.20.30"
+host2: "100.20.30.40"
+host3: "100.30.40.50"
+inventory_path: "/var/lib/awx/projects/omnia/appliance/test"
+test_input_config_filename: "input_config_test.yml"
+test_case_success_msg: "Test case passed"
+test_case_failure_msg: "Test case failed"
+file_permission: 0644

+ 22 - 9
appliance/test/test_vars/test_provision_vars.yml

@@ -14,11 +14,7 @@
 ---
 
 # Usage: test_provision_cdip.yml
-empty_password: ""
-lengthy_password: "a1b2c3d4e5f6g7h8i9j10k11"
-whitespace_password: "hello world 123"
-special_character_password: "hello@123#%"
-first_nic: "eno1"
+first_nic: "em1"
 nic1_ip_address: 172.17.0.1
 validate_password_success_msg: "Password validation successful"
 validate_password_fail_msg: "Password validation failed"
@@ -29,12 +25,14 @@ cobbler_cnt_success_msg: "Docker container cobbler exists"
 nic_check_fail_msg: "NIC-1 ip address validation failed"
 nic_check_success_msg: "NIC-1 ip address validation successful"
 cobbler_image_files:
- - configure_nic
  - check_prerequisites
  - mount_iso
  - firewall_settings
  - provision_password
+ - dhcp_configure
  - cobbler_image
+password_config_file: "password_config"
+test_input_config_filename: "appliance_config_test.yml"
 
 # Usage: test_provision_cc.yml
 docker_check_success_msg: "Docker service stopped usescase validation successful"
@@ -55,7 +53,20 @@ kickstart_file_fail_msg: "Kickstart file validation failed"
 kickstart_file_success_msg: "Kickstart file validation successful"
 cobbler_reboot_fail_msg: "Cobbler container failed to start after reboot"
 cobbler_reboot_success_msg: "Cobbler container started successfully after reboot"
-kickstart_filename: "centos8.ks"
+crontab_list_fail_msg: "Crontab list validation failed"
+crontab_list_success_msg: "Crontab list validation successful"
+iso_check_fail_msg: "centos iso file check validation failed"
+iso_check_success_msg: "centos iso file check validation successful"
+cobbler_service_check_fail_msg: "cobbler service validation failed"
+cobbler_service_check_success_msg: "cobbler service validation successful"
+kickstart_filename: "centos7.ks"
+iso_file_path: "../roles/provision/files"
+temp_iso_name: "temp_centos.iso"
+cobbler_services:
+ - tftp
+ - dhcpd
+ - cobblerd
+ - xinetd
 
 # Usage: test_provision_cdip.yml, test_provision_cc.yml, test_provision_ndod.yml
 docker_container_name: "cobbler"
@@ -68,5 +79,7 @@ authentication_fail_msg: "Server authentication validation failed"
 authentication_success_msg: "Server authentication validation successful"
 ip_address_fail_msg: "IP address validation failed"
 ip_address_success_msg: "IP address validation successful"
-cobbler_groupname: "cobbler_servers"
-file_permission: 0644
+cobbler_groupname: "all"
+inventory_file: "provisioned_hosts.yml"
+file_permission: 0644
+vault_path: ../roles/common/files/.vault_key

+ 8 - 12
appliance/test/test_vars/test_web_ui_vars.yml

@@ -13,22 +13,17 @@
 #  limitations under the License.
 ---
 
-# Usage: tasks/main.yml
-awx_url: http://localhost
+# Usage: test_web_ui.yml
 return_status: 200
 fail_return_status: -1
-awx_listening_port: 80
+awx_listening_port: 8081
 time: 1
 actual_containers: 4
-empty_password: ""
-boundary_password: "a1b2c3d4"
-lengthy_password: "a1b2c3d4e5f6g7h8i9j10k11"
-unsupported_password: "un supported-password"
 package: "docker-ce"
-awx_exists_msg: "Installation Skipped: AWX instance is already running on your system"
-awx_not_exists_msg: "AWX does not exist"
-validate_password_success_msg: "Password validation succesful"
-validate_password_fail_msg: "Password validation failed"
+awx_exists_msg: "Test case passed: AWX instance is already running on your system"
+awx_not_exists_msg: "Test case failed: AWX does not exist"
+validate_password_success_msg: "Test case passed: Password validation succesful"
+validate_password_fail_msg: "Test case failed: Password validation failed"
 resource_exists_success_msg: "Success: Requested resource(s) exists"
 resource_exists_fail_msg: "Failure: Requested resource(s) does not exists"
 compute_group_name: "compute"
@@ -36,4 +31,5 @@ manager_group_name: "manager"
 tower_cli_package_name: "ansible-tower-cli"
 docker_container_name: "awx_web"
 container_up_status_success_msg: "Container is running successfully after the reboot"
-container_up_status_fail_msg: "Container is not running after the reboot"
+container_up_status_fail_msg: "Container is not running after the reboot"
+test_input_config_filename: input_config_test.yml

+ 281 - 169
appliance/test/test_web_ui.yml

@@ -1,159 +1,21 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
 #
 #      http://www.apache.org/licenses/LICENSE-2.0
 #
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 ---
 
 # Testcase OMNIA_CRM_US_AWXD_TC_001
-# Test case to validate the AWX password with empty string
-- name: OMNIA_CRM_US_AWXD_TC_001
-  hosts: localhost
-  connection: local
-  gather_subset:
-    - 'min'
-  vars_files:
-    - ../roles/web_ui/vars/main.yml
-    - ../roles/common/vars/main.yml
-    - test_vars/test_web_ui_vars.yml
-  tasks:
-    - block:
-        - name: Test the awx password with empty string
-          include_tasks: ../roles/web_ui/tasks/awx_password.yml
-          vars:
-            no_prompt: true
-            admin_password: "{{ empty_password }}"
-            admin_password_confirm: "{{ empty_password }}"
-
-      rescue:
-        - name: Validate error message
-          assert:
-            that: fail_msg_pwd_format in msg_pwd_format.msg
-            success_msg: "{{ validate_password_success_msg }}"
-            fail_msg: "{{ validate_password_fail_msg }}"
-      tags: TC_001
-
-# Testcase OMNIA_CRM_US_AWXD_TC_002
-# Test case to validate the AWX password with exactly 8 characters
-- name: OMNIA_CRM_US_AWXD_TC_002
-  hosts: localhost
-  connection: local
-  gather_subset:
-    - 'min'
-  vars_files:
-    - ../roles/web_ui/vars/main.yml
-    - ../roles/common/vars/main.yml
-    - test_vars/test_web_ui_vars.yml
-  tasks:
-    - name: Test the awx password with 8 characters
-      include_tasks: ../roles/web_ui/tasks/awx_password.yml
-      vars:
-        no_prompt: true
-        admin_password: "{{ boundary_password }}"
-        admin_password_confirm: "{{ boundary_password }}"
-      tags: TC_002
-
-    - name: Validate success message
-      assert:
-        that: success_msg_pwd_format in msg_pwd_format.msg
-        success_msg: "{{ validate_password_success_msg }}"
-        fail_msg: "{{ validate_password_fail_msg }}"
-      tags: TC_002
-
-# Testcase OMNIA_CRM_US_AWXD_TC_003
-# Test case to validate the AWX password with length greater than 15 characters
-- name: OMNIA_CRM_US_AWXD_TC_003
-  hosts: localhost
-  connection: local
-  gather_subset:
-    - 'min'
-  vars_files:
-    - ../roles/web_ui/vars/main.yml
-    - ../roles/common/vars/main.yml
-    - test_vars/test_web_ui_vars.yml
-  tasks:
-    - name: Test the awx password with length greater than 15 characters
-      include_tasks: ../roles/web_ui/tasks/awx_password.yml
-      vars:
-        no_prompt: true
-        admin_password: "{{ lengthy_password }}"
-        admin_password_confirm: "{{ lengthy_password }}"
-      tags: TC_003
-
-    - name: Validate success message
-      assert:
-        that: success_msg_pwd_format in msg_pwd_format.msg
-        success_msg: "{{ validate_password_success_msg }}"
-        fail_msg: "{{ validate_password_fail_msg }}"
-      tags: TC_003
-
-# Testcase OMNIA_CRM_US_AWXD_TC_004
-# Test case to validate the AWX password when confirm password is given incorrect
-- name: OMNIA_CRM_US_AWXD_TC_004
-  hosts: localhost
-  connection: local
-  gather_subset:
-    - 'min'
-  vars_files:
-    - ../roles/web_ui/vars/main.yml
-    - ../roles/common/vars/main.yml
-    - test_vars/test_web_ui_vars.yml
-  tasks:
-    - block:
-        - name: Test the awx installation if confirm password is given incorrect
-          include_tasks: ../roles/web_ui/tasks/awx_password.yml
-          vars:
-            no_prompt: true
-            admin_password: "{{ boundary_password }}"
-            admin_password_confirm: "{{ lengthy_password }}"
-
-      rescue:
-        - name: Validate error message
-          assert:
-            that: fail_msg_pwd_confirm in msg_pwd_confirm.msg
-            success_msg: "{{ validate_password_success_msg }}"
-            fail_msg: "{{ validate_password_fail_msg }}"
-      tags: TC_004
-
-# Testcase OMNIA_CRM_US_AWXD_TC_005
-# Test case to validate the AWX password with unsupported strings
-- name: OMNIA_CRM_US_AWXD_TC_005
-  hosts: localhost
-  connection: local
-  gather_subset:
-    - 'min'
-  vars_files:
-    - ../roles/web_ui/vars/main.yml
-    - ../roles/common/vars/main.yml
-    - test_vars/test_web_ui_vars.yml
-  tasks:
-    - block:
-        - name: Test the awx password with unsupported strings
-          include_tasks: ../roles/web_ui/tasks/awx_password.yml
-          vars:
-            no_prompt: true
-            admin_password: "{{ unsupported_password }}"
-            admin_password_confirm: "{{ unsupported_password }}"
-
-      rescue:
-        - name: Validate error message
-          assert:
-            that: fail_msg_pwd_format in msg_pwd_format.msg
-            success_msg: "{{ validate_password_success_msg }}"
-            fail_msg: "{{ validate_password_fail_msg }}"
-      tags: TC_005
-
-# Testcase OMNIA_CRM_US_AWXD_TC_006
 # Test case to verify the prerequisites are installed and execute the AWX deployment
-- name: OMNIA_CRM_US_AWXD_TC_006
+- name: OMNIA_CRM_US_AWXD_TC_001
   hosts: localhost
   connection: local
   gather_subset:
@@ -165,38 +27,44 @@
     - name: Get the docker package facts
       package_facts:
         manager: auto
-      tags: TC_006
+      tags: TC_001
 
     - name: Check if docker-ce is already installed
       debug:
         var: ansible_facts.packages['{{ package }}']
-      tags: TC_006
+      tags: TC_001
 
     - block:
-        - name: Calling the role to be tested
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Calling the web_ui role to be tested
           include_role:
             name: ../roles/web_ui
-      tags: TC_006
+      tags: TC_001
 
     - name: Check that you can connect to github repo and it returns a status 200
       uri:
         url: "{{ awx_git_repo }}"
         status_code: "{{ return_status }}"
         return_content: true
-      tags: TC_006
+      tags: TC_001
 
     - name: Check that you can can connect to AWX UI and it returns a status 200
       uri:
-        url: "{{ awx_url }}"
+        url: "{{ awx_ip }}"
         status_code: "{{ return_status }}"
         return_content: true
-      tags: TC_006
+      tags: TC_001
 
-    - name: verify awx-server is listening on 80
+    - name: verify awx-server is listening on 8081
       wait_for:
         port: "{{ awx_listening_port }}"
         timeout: "{{ time }}"
-      tags: TC_006
+      tags: TC_001
 
     - name: Get the containers count
       shell: |
@@ -204,18 +72,18 @@
         docker ps -a | grep awx | wc -l
       register: containers_count
       changed_when: False
-      tags: TC_006
+      tags: TC_001
 
     - name: Validate the containers count
       assert:
         that: containers_count.stdout | int >= actual_containers
         success_msg: "{{ awx_exists_msg }}"
         fail_msg: "{{ awx_not_exists_msg }}"
-      tags: TC_006
+      tags: TC_001
 
-# Testcase OMNIA_CRM_US_AWXD_TC_007
+# Testcase OMNIA_CRM_US_AWXD_TC_002
 # Test case to verify regression testing
-- name: OMNIA_CRM_US_AWXD_TC_007
+- name: OMNIA_CRM_US_AWXD_TC_002
   hosts: localhost
   connection: local
   gather_subset:
@@ -225,30 +93,36 @@
     - test_vars/test_web_ui_vars.yml
   tasks:
     - block:
-        - name: Calling the role to be tested
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Calling the web_ui role to be tested
           include_role:
             name: ../roles/web_ui
-      tags: TC_007
+      tags: TC_002
 
     - name: Check that you can connect to github repo and it returns a status 200
       uri:
         url: "{{ awx_git_repo }}"
         status_code: "{{ return_status }}"
         return_content: true
-      tags: TC_007
+      tags: TC_002
 
     - name: Check that you can can connect to AWX UI and it returns a status 200
       uri:
-        url: "{{ awx_url }}"
+        url: "{{ awx_ip }}"
         status_code: "{{ return_status }}"
         return_content: true
-      tags: TC_007
+      tags: TC_002
 
     - name: verify awx-server is listening on 80
       wait_for:
         port: "{{ awx_listening_port }}"
         timeout: "{{ time }}"
-      tags: TC_007
+      tags: TC_002
 
     - name: Get the containers count
       shell: |
@@ -256,11 +130,249 @@
         docker ps -a | grep awx | wc -l
       register: containers_count
       changed_when: False
-      tags: TC_007
+      tags: TC_002
 
     - name: Validate the containers count
       assert:
         that: containers_count.stdout | int >= actual_containers
         success_msg: "{{ awx_exists_msg }}"
         fail_msg: "{{ awx_not_exists_msg }}"
-      tags: TC_007
+      tags: TC_002
+
+# Testcase OMNIA_CRM_US_AWXD_TC_003
+# Test case to validate the AWX configuration
+- name: OMNIA_CRM_US_AWXD_TC_003
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_web_ui_vars.yml
+  tasks:
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Calling the web_ui role to be tested
+          include_role:
+            name: ../roles/web_ui
+      tags: TC_003
+
+    - name: Get the package facts
+      package_facts:
+        manager: auto
+      tags: TC_003
+
+    - name: Check if ansible-tower-cli is already installed
+      assert:
+        that: "'{{ tower_cli_package_name }}' in ansible_facts.packages"
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing organizations
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        organizations list -f human
+      register: organizations_array
+      changed_when: False
+      tags: TC_003
+
+    - name: Check for organization
+      assert:
+        that: organization_name in organizations_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing projects
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        projects list -f human
+      changed_when: False
+      register: projects_array
+      tags: TC_003
+
+    - name: Check for project
+      assert:
+        that: project_name in projects_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing inventories
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        inventory list -f human
+      changed_when: False
+      register: inventory_array
+      tags: TC_003
+
+    - name: Check for inventories
+      assert:
+        that: omnia_inventory_name in inventory_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing groups if omnia-inventory exists
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        groups list --inventory "{{ omnia_inventory_name }}" -f human
+      changed_when: False
+      register: groups_array
+      when: omnia_inventory_name in inventory_array.stdout
+      tags: TC_003
+
+    - name: Check for manager and compute groups
+      assert:
+        that: manager_group_name and compute_group_name in groups_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing credentials
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        credentials list -f human
+      changed_when: False
+      register: credentials_array
+      tags: TC_003
+
+    - name: Check for "{{ credential_name }}"
+      assert:
+        that: credential_name in credentials_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing job templates
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        job_templates list -f human
+      changed_when: False
+      register: templates_array
+      tags: TC_003
+
+    - name: Check for templates
+      assert:
+        that: omnia_template_name and inventory_template_name in templates_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing schedules for job templates
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        schedules list -f human
+      changed_when: False
+      register: schedules_array
+      tags: TC_003
+
+    - name: Check for schedules to job template
+      assert:
+        that: schedule_name in schedules_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+# Testcase OMNIA_CRM_US_AWXD_TC_004
+# Execute common role in management station without internet connectivity
+- name: OMNIA_CRM_US_AWXD_TC_004
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_common_vars.yml
+    - ../roles/common/vars/main.yml
+  tasks:
+    - name: Down internet connectivity
+      lineinfile:
+        path: /etc/hosts
+        line: "172.16.0.5 github.com"
+        state: present
+        backup: yes
+      tags: TC_004
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Calling the web_ui role to be tested
+          include_role:
+            name: ../roles/web_ui
+
+      rescue:
+        - name: Validate internet connectivity failure message
+          assert:
+            that: internet_status in internet_value.msg
+            success_msg: "{{ internet_check_success_msg }}"
+            fail_msg: "{{ internet_check_fail_msg }}"
+      tags: TC_004
+
+    - name: Up internet connectivity
+      lineinfile:
+        path: /etc/hosts
+        line: "172.16.0.5 github.com"
+        state: absent
+      tags: TC_004
+
+# Testcase OMNIA_CRM_US_AWXD_TC_005
+# Execute web_ui role in management station and reboot the server
+- name: OMNIA_CRM_US_AWXD_TC_005
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_web_ui_vars.yml
+  tasks:
+    - name: Get last uptime of the server
+      command: uptime -s
+      register: uptime_status
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_005
+
+    - name: Get current date
+      command: date +"%Y-%m-%d %H"
+      register: current_time
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_005
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Calling the web_ui role to be tested
+          include_role:
+            name: ../roles/web_ui
+      tags: TC_005
+
+    - name: Reboot localhost
+      command: reboot
+      when: current_time.stdout not in uptime_status.stdout
+      tags: TC_005
+
+    - name: Inspect AWX web container
+      docker_container_info:
+        name: "{{ docker_container_name }}"
+      register: awx_container_status
+      tags: TC_005
+
+    - name: Verify AWX container is running after reboot
+      assert:
+        that:
+          - "'running' in awx_container_status.container.State.Status"

+ 0 - 212
appliance/test/test_web_ui_awxc.yml

@@ -1,212 +0,0 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
----
-
-# Testcase OMNIA_CRM_US_AWXD_TC_008
-# Test case to validate the AWX configuration
-- name: OMNIA_CRM_US_AWXD_TC_008
-  hosts: localhost
-  connection: local
-  gather_subset:
-    - 'min'
-  vars_files:
-    - ../roles/web_ui/vars/main.yml
-    - ../roles/common/vars/main.yml
-    - test_vars/test_web_ui_vars.yml
-  tasks:
-
-    - block:
-        - name: Call the role to be tested
-          include_role:
-            name: ../roles/web_ui
-      tags: TC_008
-
-    - name: Get the package facts
-      package_facts:
-        manager: auto
-      tags: TC_008
-
-    - name: Check if ansible-tower-cli is already installed
-      assert:
-        that: "'{{ tower_cli_package_name }}' in ansible_facts.packages"
-        success_msg: "{{ resource_exists_success_msg }}"
-        fail_msg: "{{ resource_exists_fail_msg }}"
-      tags: TC_008
-
-    - name: Get the existing organizations
-      command: >-
-        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}" organizations list -f human
-      register: organizations_array
-      changed_when: False
-      tags: TC_008
-
-    - name: Check for organization
-      assert:
-        that: organization_name in organizations_array.stdout
-        success_msg: "{{ resource_exists_success_msg }}"
-        fail_msg: "{{ resource_exists_fail_msg }}"
-      tags: TC_008
-
-    - name: Get the existing projects
-      command: >-
-        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}" projects list -f human
-      changed_when: False
-      register: projects_array
-      tags: TC_008
-
-    - name: Check for project
-      assert:
-        that: project_name in projects_array.stdout
-        success_msg: "{{ resource_exists_success_msg }}"
-        fail_msg: "{{ resource_exists_fail_msg }}"
-      tags: TC_008
-
-    - name: Get the existing inventories
-      command: >-
-        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}" inventory list -f human
-      changed_when: False
-      register: inventory_array
-      tags: TC_008
-
-    - name: Check for inventories
-      assert:
-        that: omnia_inventory_name in inventory_array.stdout
-        success_msg: "{{ resource_exists_success_msg }}"
-        fail_msg: "{{ resource_exists_fail_msg }}"
-      tags: TC_008
-
-    - name: Get the existing groups if omnia-inventory exists
-      command: >-
-        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
-        groups list --inventory "{{ omnia_inventory_name }}" -f human
-      changed_when: False
-      register: groups_array
-      when: omnia_inventory_name in inventory_array.stdout
-      tags: TC_008
-
-    - name: Check for manager and compute groups
-      assert:
-        that: manager_group_name and compute_group_name in groups_array.stdout
-        success_msg: "{{ resource_exists_success_msg }}"
-        fail_msg: "{{ resource_exists_fail_msg }}"
-      tags: TC_008
-
-    - name: Get the existing job templates
-      command: >-
-        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}" job_templates list -f human
-      changed_when: False
-      register: templates_array
-      tags: TC_008
-
-    - name: Check for templates
-      assert:
-        that: omnia_template_name and inventory_template_name in templates_array.stdout
-        success_msg: "{{ resource_exists_success_msg }}"
-        fail_msg: "{{ resource_exists_fail_msg }}"
-      tags: TC_008
-
-    - name: Get the existing schedules for job templates
-      command: >-
-        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}" schedules list -f human
-      changed_when: False
-      register: schedules_array
-      tags: TC_008
-
-    - name: Check for schedules to job template
-      assert:
-        that: schedule_name in schedules_array.stdout
-        success_msg: "{{ resource_exists_success_msg }}"
-        fail_msg: "{{ resource_exists_fail_msg }}"
-      tags: TC_008
-
-# Testcase OMNIA_CRM_US_AWXD_TC_009
-# Execute common role in management station without internet connectivity
-- name: OMNIA_CRM_US_AWXD_TC_009
-  hosts: localhost
-  connection: local
-  gather_subset:
-    - 'min'
-  vars_files:
-    - test_vars/test_common_vars.yml
-    - ../roles/common/vars/main.yml
-  tasks:
-    - name: Down internet connectivity
-      lineinfile:
-        path: /etc/hosts
-        line: "172.16.0.5 github.com"
-        state: present
-        backup: yes
-      tags: TC_009
-
-    - block:
-        - name: Call common role
-          include_role:
-            name: ../roles/web_ui
-
-      rescue:
-        - name: Validate internet connectivity failure message
-          assert:
-            that: internet_status in internet_value.msg
-            success_msg: "{{ internet_check_success_msg }}"
-            fail_msg: "{{ internet_check_fail_msg }}"
-      tags: TC_009
-
-    - name: Up internet connectivity
-      lineinfile:
-        path: /etc/hosts
-        line: "172.16.0.5 github.com"
-        state: absent
-      tags: TC_009
-
-# Testcase OMNIA_CRM_US_AWXD_TC_010
-# Execute provision role in management station and reboot management station
-- name: OMNIA_CRM_US_AWXD_TC_010
-  hosts: localhost
-  connection: local
-  vars_files:
-    - test_vars/test_web_ui_vars.yml
-  tasks:
-    - name: Check last uptime of the server
-      shell: |
-        current_time=$(date +"%Y-%m-%d %H")
-        uptime -s | grep "$current_time"
-      register: uptime_status
-      changed_when: false
-      ignore_errors: yes
-      tags: TC_010
-
-    - block:
-        - name: Call web_ui role
-          include_role:
-            name: ../roles/web_ui
-          vars:
-            no_prompt: true
-      tags: TC_010
-
-    - name: Reboot localhost
-      command: reboot
-      when: uptime_status.stdout|length < 1
-      tags: TC_010
-
-    - name: Inspect AWX web container
-      docker_container_info:
-        name: "{{ docker_container_name }}"
-      register: awx_container_status
-      tags: TC_010
-
-    - name: Verify AWX container is running after reboot
-      assert:
-        that:
-          - "'running' in awx_container_status.container.State.Status"
-      tags: TC_010

+ 40 - 0
appliance/tools/passwordless_ssh.yml

@@ -0,0 +1,40 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Fetch provision_password
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  roles:
+    - fetch_password
+
+- name: Prepare the cluster with passwordless ssh from manager to compute
+  hosts: manager
+  gather_facts: false
+  pre_tasks:
+    - name: Set Fact
+      set_fact:
+        ssh_to: "{{ groups['compute'] }}"
+  roles:
+    - cluster_preperation
+
+- name: Prepare the cluster with passwordless ssh from compute to manager
+  hosts: compute
+  gather_facts: false
+  pre_tasks:
+    - name: Set Fact
+      set_fact:
+        ssh_to: "{{ groups['manager'] }}"
+  roles:
+    - cluster_preperation

+ 25 - 0
appliance/tools/provision_host_report.j2

@@ -0,0 +1,25 @@
+HPC Cluster
+-----------
+Reachable Hosts:
+{% if reachable_host_number > 0 %}
+{% for host in groups['reachable'] %}
+{% if reachable_host_number == 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_reachable.stdout | replace(';','')}}
+{% elif reachable_host_number > 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_reachable.results[loop.index|int - 1].stdout | replace(';','')}}
+{% endif %}
+{% endfor %}
+{% endif %}
+Total reachable hosts: {{ reachable_host_number }}
+
+Unreachable Hosts:
+{% if unreachable_host_number > 0 %}
+{% for host in groups['ungrouped'] %}
+{% if unreachable_host_number == 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_unreachable.stdout | replace(';','')}}
+{% elif unreachable_host_number > 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_unreachable.results[loop.index|int - 1].stdout | replace(';','')}}
+{% endif %}
+{% endfor %}
+{% endif %}
+Total unreachable hosts: {{ unreachable_host_number }}

+ 83 - 0
appliance/tools/provision_report.yml

@@ -0,0 +1,83 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# This file used to generate a report of reachable and unreachable host of hpc cluster
+# This file can be executed only if provisioned_hosts.yml is created inside the path omnia/appliance/roles/inventory/files/provisioned_hosts.yml
+
+# Command to execute: ansible-playbook provision_report.yml -i ../roles/inventory/files/provisioned_hosts.yml
+
+- name: Find reachable hosts
+  hosts: all
+  gather_facts: false
+  ignore_unreachable: true
+  ignore_errors: true
+  tasks:
+    - name: Check for reachable nodes
+      command: ping -c1 {{ inventory_hostname }}
+      delegate_to: localhost
+      register: ping_result
+      ignore_errors: yes
+      changed_when: false
+
+    - name: Group reachable hosts
+      group_by:
+        key: "reachable"
+      when: "'100% packet loss' not in ping_result.stdout"
+
+- name: Display hosts list
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Set reachable and unreachable host number
+      set_fact:
+        reachable_host_number: "{{ groups['reachable'] | length}}"
+        unreachable_host_number: "{{ groups['ungrouped'] | length}}"
+
+    - name: Copy dhcpd.leases from cobbler
+      command: docker cp cobbler:/var/lib/dhcpd/dhcpd.leases dhcpd.leases
+      changed_when: true
+
+    - name: Fetch ethernet details of unreachable hosts
+      shell: sed -n '/{{ item }}/,/ethernet/p' dhcpd.leases | grep "ethernet" | awk '{ print $3 }' | uniq
+      register: ethernet_detail_unreachable
+      changed_when: false
+      args:
+        warn: no
+      with_items:
+        - "{{ groups['ungrouped'] }}"
+
+    - name: Fetch ethernet details of reachable hosts
+      shell: sed -n '/{{ item }}/,/ethernet/p' dhcpd.leases | grep "ethernet" | awk '{ print $3 }' | uniq
+      register: ethernet_detail_reachable
+      changed_when: false
+      args:
+        warn: no
+      with_items:
+        - "{{ groups['reachable'] }}"
+
+    - name: Copy host information to file
+      template:
+        src: provision_host_report.j2
+        dest: provision_host_report.txt
+
+    - name: Read provision host report
+      command: cat provision_host_report.txt
+      register: host_report
+      changed_when: false
+
+    - name: Display provision host report
+      debug:
+        var: host_report.stdout_lines

+ 36 - 0
appliance/tools/roles/cluster_preperation/tasks/main.yml

@@ -0,0 +1,36 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Set Facts
+  set_fact:
+    ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+
+- name: Disable host key checking
+  replace:
+    path: /etc/ssh/ssh_config
+    regexp: '#   StrictHostKeyChecking ask'
+    replace: 'StrictHostKeyChecking no'
+
+- name: Install sshpass
+  package:
+    name: sshpass
+    state: present
+
+- name: Verify and set passwordless ssh from manager to compute nodes
+  block:
+    - name: Execute on individual hosts
+      include_tasks: passwordless_ssh.yml
+      with_items: "{{ ssh_to }}"
+      loop_control:
+        pause: 5

+ 78 - 0
appliance/tools/roles/cluster_preperation/tasks/passwordless_ssh.yml

@@ -0,0 +1,78 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Initialize variables
+  set_fact:
+    ssh_status: false
+    current_host: "{{ item }}"
+
+- name: Verify whether passwordless ssh is set on the remote host
+  command: ssh -o PasswordAuthentication=no root@"{{ current_host }}" 'hostname'
+  register: ssh_output
+  ignore_errors: yes
+  changed_when: False
+
+- name: Update ssh connection status
+  set_fact:
+    ssh_status: true
+  when: "'Permission denied' not in ssh_output.stderr"
+
+- name: Verify the public key file existence
+  stat:
+    path: "{{ rsa_id_file }}"
+  register: verify_rsa_id_file
+  when: not ssh_status
+
+- name: Generate ssh key pair
+  command: ssh-keygen -t rsa -b 4096 -f "{{ rsa_id_file }}" -q -N "{{ passphrase }}"
+  when:
+    - not ssh_status
+    - not verify_rsa_id_file.stat.exists
+
+- name: Add the key identity
+  shell: |
+    eval `ssh-agent -s`
+    ssh-add "{{ rsa_id_file }}"
+  when: not ssh_status
+
+- name: Post public key
+  block:
+    - name: Create .ssh directory
+      command: >-
+        sshpass -p "{{ hostvars['127.0.0.1']['cobbler_password'] }}"
+        ssh root@"{{ current_host }}" mkdir -p /root/.ssh
+      when: not ssh_status
+      no_log: True
+      register: register_error
+
+    - name: Copy the public key to remote host
+      shell: >-
+        set -o pipefail && cat "{{ rsa_id_file }}".pub
+        | sshpass -p "{{ hostvars['127.0.0.1']['cobbler_password'] }}"
+        ssh root@"{{ current_host }}" 'cat >> "{{ auth_key_path }}"'
+      when: not ssh_status
+      no_log: True
+      register: register_error
+
+    - name: Change permissions on the remote host
+      shell: sshpass -p "{{ hostvars['127.0.0.1']['cobbler_password'] }}" ssh root@"{{ current_host }}" 'chmod 700 .ssh; chmod 640 "{{ auth_key_path }}"'
+      when: not ssh_status
+      no_log: True
+      register: register_error
+
+  rescue:
+    - name: Passwordless ssh failed
+      fail:
+        msg: "{{ register_error.stderr | regex_replace(hostvars['127.0.0.1']['cobbler_password']) | regex_replace(auth_key_path) }}"

+ 19 - 0
appliance/tools/roles/cluster_preperation/vars/main.yml

@@ -0,0 +1,19 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+#Usage: passwordless_ssh.yml
+rsa_id_file: "/root/.ssh/id_rsa"
+passphrase: ""
+auth_key_path: "/root/.ssh/authorized_keys"

+ 42 - 0
appliance/tools/roles/fetch_password/tasks/main.yml

@@ -0,0 +1,42 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Include variables from common role
+  include_vars: "{{ role_path }}/../../../roles/common/vars/main.yml"
+  no_log: True
+
+- name: Check input config file is encrypted
+  command: cat {{ role_path }}/../../../{{ input_config_filename }}
+  changed_when: false
+  register: config_content
+
+- name: Decrpyt appliance_config.yml
+  command: >-
+    ansible-vault decrypt {{ role_path }}/../../../{{ input_config_filename }}
+    --vault-password-file {{ role_path }}/../../../{{ vault_filename }}
+  changed_when: false
+  when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+
+- name: Include variable file appliance_config.yml
+  include_vars: "{{ role_path }}/../../../{{ input_config_filename }}"
+
+- name: Save input variables from file
+  set_fact:
+    cobbler_password: "{{ provision_password }}"
+
+- name: Encrypt input config file
+  command: >-
+    ansible-vault encrypt {{ role_path }}/../../../{{ input_config_filename }}
+    --vault-password-file {{ role_path }}/../../../{{ vault_filename }}
+  changed_when: false

+ 97 - 0
docs/INSTALL_OMNIA.md

@@ -0,0 +1,97 @@
+# Install Omnia
+
+The following sections provide details on installing Omnia using CLI. If you want to install the Omnia appliance and manage workloads using the Omnia appliance, see [INSTALL_OMNIA_APPLIANCE](INSTALL_OMNIA_APPLIANCE.md) and [MONITOR_CLUSTERS](MONITOR_CLUSTERS.md) files for more information.
+
+## Prerequisties to install Omnia using CLI
+Ensure that all the prerequisites listed in the [PREINSTALL_OMNIA](PREINSTALL_OMNIA.md) file are met before installing Omnia.
+
+## Steps to install Omnia using CLI
+__Note:__ If there are errors when any of the following Ansible playbook commands are run, re-run the commands again.  
+__Note:__ The user should have root privileges to perform installations and configurations.
+
+1. Clone the Omnia repository.
+``` 
+$ git clone https://github.com/dellhpc/omnia.git 
+```
+__Note:__ After the Omnia repository is cloned, a folder named __omnia__ is created. It is recommended that you do not rename this folder.
+
+2. Change the directory to __omnia__: `cd omnia`
+
+3. An inventory file must be created in the __omnia__ folder. Add compute node IPs under **[compute]** group and the manager node IP under **[manager]** group. See the INVENTORY template file under `omnia\docs` folder.
+
+4. To install Omnia, run the following command.
+```
+ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2" 
+```
+
+5. By default, no skip tags are selected and both Kubernetes and Slurm will be deployed.  
+To skip the installation of Kubernetes, enter:  
+`ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"  --skip-tags "kubernetes"`  
+Similarly, to skip Slurm, enter:  
+`ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"  --skip-tags "slurm"`  
+__Note:__ If you would like to skip the NFS client setup, enter the following command to skip the k8s_nfs_client_setup role of Kubernetes:  
+`ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"  --skip-tags "nfs_client"`
+
+6. To provide password for mariaDB Database for Slurm accounting and Kubernetes CNI, edit the `omnia_config.yml` file.  
+__Note:__ Supported Kubernetes CNI : calico and flannel. The default CNI is calico.  
+To view the set passwords of omnia_config.yml at a later time, run the following command:  
+`ansible-vault view omnia_config.yml --vault-password-file .omnia_vault_key`
+
+Omnia considers `slurm` as the default username for MariaDB.  
+
+The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file is run:
+- __common__ role:
+	- Install common packages on manager and compute nodes
+	- Docker is installed
+	- Deploy time ntp/chrony
+	- Install Nvidia drivers and software components
+- **k8s_common** role: 
+	- Required Kubernetes packages are installed
+	- Starts the docker and Kubernetes services.
+- **k8s_manager** role: 
+	- __helm__ package for Kubernetes is installed.
+- **k8s_firewalld** role: This role is used to enable the required ports to be used by Kubernetes. 
+	- For __head-node-ports__: 6443, 2379-2380,10251,10252
+	- For __compute-node-ports__: 10250,30000-32767
+	- For __calico-udp-ports__: 4789
+	- For __calico-tcp-ports__: 5473,179
+	- For __flanel-udp-ports__: 8285,8472
+- **k8s_nfs_server_setup** role: 
+	- A __nfs-share__ directory, `/home/k8snfs`, is created. Using this directory, compute nodes share the common files.
+- **k8s_nfs_client_setup** role
+- **k8s_start_manager** role: 
+	- Runs the __/bin/kubeadm init__ command to initialize the Kubernetes services on manager node.
+	- Initialize the Kubernetes services in the manager node and create service account for Kubernetes Dashboard
+- **k8s_start_workers** role: 
+	- The compute nodes are initialized and joined to the Kubernetes cluster with the manager node. 
+- **k8s_start_services** role
+	- Kubernetes services are deployed such as Kubernetes Dashboard, Prometheus, MetalLB and NFS client provisioner
+
+__Note:__ After Kubernetes is installed and configured, few Kubernetes and calico/flannel related ports are opened in the manager and compute nodes. This is required for Kubernetes Pod-to-Pod and Pod-to-Service communications. Calico/flannel provides a full networking stack for Kubernetes pods.
+
+The following __Slurm__ roles are provided by Omnia when __omnia.yml__ file is run:
+- **slurm_common** role:
+	- Installs the common packages on manager node and compute node.
+- **slurm_manager** role:
+	- Installs the packages only related to manager node
+	- This role also enables the required ports to be used by Slurm.  
+	    **tcp_ports**: 6817,6818,6819  
+		**udp_ports**: 6817,6818,6819
+	- Creating and updating the Slurm configuration files based on the manager node requirements.
+- **slurm_workers** role:
+	- Installs the Slurm packages into all compute nodes as per the compute node requirements.
+- **slurm_start_services** role: 
+	- Starting the Slurm services so that compute node communicates with manager node.
+- **slurm_exporter** role: 
+	- Slurm exporter is a package for exporting metrics collected from Slurm resource scheduling system to prometheus.
+	- Slurm exporter is installed on the host like Slurm, and Slurm exporter will be successfully installed only if Slurm is installed.
+
+**Note:** If you want to install JupyterHub and Kubeflow playbooks, you have to first install the JupyterHub playbook and then install the Kubeflow playbook.
+
+Commands to install JupyterHub and Kubeflow:
+* `ansible-playbook platforms/jupyterhub.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"`
+* `ansible-playbook platforms/kubeflow.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2" `
+
+## Adding a new compute node to the cluster
+
+The user has to update the INVENTORY file present in `omnia` directory with the new node IP address in the compute group. Then, `omnia.yml` has to be run to add the new node to the cluster and update the configurations of the manager node.

+ 174 - 0
docs/INSTALL_OMNIA_APPLIANCE.md

@@ -0,0 +1,174 @@
+# Install the Omnia appliance
+
+## Prerequisties
+Ensure that all the prerequisites listed in the [PREINSTALL_OMNIA_APPLIANCE](PREINSTALL_OMNIA_APPLIANCE.md) file are met before installing Omnia appliance
+
+__Note:__ Changing the manager node after the installation of Omnia is not supported by Omnia. If you want to change the manager node, you must redeploy the entire cluster.  
+__Note:__ The user should have root privileges to perform installations and configurations.
+
+## Steps to install the Omnia appliance
+__Note:__ If there are errors when any of the following Ansible playbook commands are run, re-run the commands again.
+1. On the management node, change the working directory to the directory where you want to clone the Omnia Git repository.
+2. Clone the Omnia repository.
+``` 
+$ git clone https://github.com/dellhpc/omnia.git 
+```
+__Note:__ After the Omnia repository is cloned, a folder named __omnia__ is created. It is recommended that you do not rename this folder.
+
+3. Change the directory to `omnia/appliance`
+4. To provide passwords for Cobbler and AWX, edit the `appliance_config.yml` file.
+* To provide a mapping file for DHCP configuration, go to **appliance_config.yml** file and set the variable named **mapping_file_exits** as __true__, else set it to __false__.
+
+Omnia considers the following usernames as default:  
+* `cobbler` for Cobbler Server
+* `admin` for AWX
+* `slurm` for MariaDB
+
+**Note**: 
+* Minimum length of the password must be at least eight characters and a maximum of 30 characters.
+* Do not use these characters while entering a password: -, \\, "", and \'
+
+5. Using the `appliance_config.yml` file, you can change the NIC for the DHCP server under **hpc_nic** and the NIC used to connect to the Internet under **public_nic**. Default values of **hpc_nic** and **public_nic** are set to em1 and em2 respectively.
+6. The valid DHCP range for HPC cluster is set in two variables named __Dhcp_start_ip_range__ and __Dhcp_end_ip_range__ present in the `appliance_config.yml` file.
+7. To provide passwords for mariaDB Database for Slurm accounting and Kubernetes CNI, edit the `omnia_config.yml` file.
+
+__Note:__ Supported Kubernetes CNI : calico and flannel. The default CNI is calico.
+
+To view the set passwords of `appliance_config.yml`, run the following command under omnia->appliance:
+```
+ansible-vault view appliance_config.yml --vault-password-file .vault_key
+```
+
+To view the set passwords of `omnia_config.yml`, run the following command:
+```
+ansible-vault view omnia_config.yml --vault-password-file .omnia_vault_key
+```
+
+8. To install Omnia, run the following command:
+```
+ansible-playbook appliance.yml -e "ansible_python_interpreter=/usr/bin/python2"
+```
+   
+Omnia creates a log file which is available at: `/var/log/omnia.log`.
+
+**Provision operating system on the target nodes**  
+Omnia role used: *provision*  
+Ports used by __Cobbler__:  
+* __TCP__ ports: 80,443,69
+* __UDP__ ports: 69,4011
+
+To create the Cobbler image, Omnia configures the following:
+* Firewall settings.
+* The kickstart file of Cobbler will enable the UEFI PXE boot.
+
+To access the Cobbler dashboard, enter `https://<IP>/cobbler_web` where `<IP>` is the Global IP address of the management node. For example, enter
+`https://100.98.24.225/cobbler_web` to access the Cobbler dashboard.
+
+__Note__: After the Cobbler Server provisions the operating system on the nodes, IP addresses and host names are assigned by the DHCP service.  
+* If a mapping file is not provided, the hostname to the server is provided based on the following format: **computexxx-xxx** where "xxx-xxx" is the last two octets of Host IP address. For example, if the Host IP address is 172.17.0.11 then he assigned hostname by Omnia is compute0-11.  
+* If a mapping file is provided, the hostnames follow the format provided in the mapping file.
+
+**Install and configure Ansible AWX**  
+Omnia role used: *web_ui*  
+Port used by __AWX__ is __8081__.  
+AWX repository is cloned from the GitHub path: https://github.com/ansible/awx.git 
+
+Omnia performs the following configuration on AWX:
+* The default organization name is set to **Dell EMC**.
+* The default project name is set to **omnia**.
+* Credential: omnia_credential
+* Inventory: omnia_inventory with compute and manager groups
+* Template: DeployOmnia and Dynamic Inventory
+* Schedules: DynamicInventorySchedule which is scheduled for every 10 mins
+
+To access the AWX dashboard, enter `http://<IP>:8081` where **\<IP>** is the Global IP address of the management node. For example, enter `http://100.98.24.225:8081` to access the AWX dashboard.
+
+**Note**: The AWX configurations are automatically performed Omnia and Dell Technologies recommends that you do not change the default configurations provided by Omnia as the functionality may be impacted.
+
+__Note__: Although AWX UI is accessible, hosts will be shown only after few nodes have been provisioned by Cobbler. It takes approximately 10 to 15 minutes to display the host details after the provisioning by Cobbler. If a server is provisioned but you are unable to view the host details on the AWX UI, then you can run **provision_report.yml** playbook from __omnia__ -> __appliance__ ->__tools__ folder to view the hosts which are reachable.
+
+## Install Kubernetes and Slurm using AWX UI
+Kubernetes and Slurm are installed by deploying the **DeployOmnia** template on the AWX dashboard.
+
+1. On the AWX dashboard, under __RESOURCES__ __->__ __Inventories__, select __Groups__.
+2. Select either __compute__ or __manager__ group.
+3. Select the __Hosts__ tab.
+4. To add the hosts provisioned by Cobbler, select __Add__ __->__ __Add__ __existing__ __host__, and then select the hosts from the list and click __Save__.
+5. To deploy Omnia, under __RESOURCES__ -> __Templates__, select __DeployOmnia__ and click __LAUNCH__.
+6. By default, no skip tags are selected and both Kubernetes and Slurm will be deployed. To install only Kubernetes, enter `slurm` and select **Create "slurm"**. Similarly, to install only Slurm, select and add `kubernetes` skip tag. 
+
+__Note:__
+*	If you would like to skip the NFS client setup, enter `nfs_client` in the skip tag section to skip the **k8s_nfs_client_setup** role of Kubernetes.
+
+7. Click **Next**.
+8. Review the details in the **Preview** window, and click **Launch** to run the DeployOmnia template. 
+
+To establish the passwordless communication between compute nodes and manager node:
+1. In AWX UI, under __RESOURCES__ -> __Templates__, select __DeployOmnia__ template.
+2. From __Playbook dropdown__ menu, select __appliance/tools/passwordless_ssh.yml__ and launch the template.
+
+__Note:__ If you want to install __JupyterHub__ and __Kubeflow__ playbooks, you have to first install the __JupyterHub__ playbook and then install the __Kubeflow__ playbook.
+
+__Note:__ To install __JupyterHub__ and __Kubeflow__ playbooks:
+*	From __AWX UI__, under __RESOURCES__ -> __Templates__, select __DeployOmnia__ template.
+*	From __Playbook dropdown__ menu, select __platforms/jupyterhub.yml__ option and launch the template to install JupyterHub playbook.
+*	From __Playbook dropdown__ menu, select __platforms/kubeflow.yml__ option and launch the template to install Kubeflow playbook.
+
+
+The DeployOmnia template may not run successfully if:
+- The Manager group contains more than one host.
+- The Compute group does not contain a host. Ensure that the Compute group must be assigned with a minimum of one host node.
+- Under Skip Tags, when both kubernetes and slurm tags are selected.
+
+After **DeployOmnia** template is run from the AWX UI, the **omnia.yml** file installs Kubernetes and Slurm, or either Kubernetes or slurm, as per the selection in the template on the management node. Additionally, appropriate roles are assigned to the compute and manager groups.
+
+The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file is run:
+- __common__ role:
+	- Install common packages on manager and compute nodes
+	- Docker is installed
+	- Deploy time ntp/chrony
+	- Install Nvidia drivers and software components
+- **k8s_common** role: 
+	- Required Kubernetes packages are installed
+	- Starts the docker and Kubernetes services.
+- **k8s_manager** role: 
+	- __helm__ package for Kubernetes is installed.
+- **k8s_firewalld** role: This role is used to enable the required ports to be used by Kubernetes. 
+	- For __head-node-ports__: 6443, 2379-2380,10251,10252
+	- For __compute-node-ports__: 10250,30000-32767
+	- For __calico-udp-ports__: 4789
+	- For __calico-tcp-ports__: 5473,179
+	- For __flanel-udp-ports__: 8285,8472
+- **k8s_nfs_server_setup** role: 
+	- A __nfs-share__ directory, `/home/k8snfs`, is created. Using this directory, compute nodes share the common files.
+- **k8s_nfs_client_setup** role
+- **k8s_start_manager** role: 
+	- Runs the __/bin/kubeadm init__ command to initialize the Kubernetes services on manager node.
+	- Initialize the Kubernetes services in the manager node and create service account for Kubernetes Dashboard
+- **k8s_start_workers** role: 
+	- The compute nodes are initialized and joined to the Kubernetes cluster with the manager node. 
+- **k8s_start_services** role
+	- Kubernetes services are deployed such as Kubernetes Dashboard, Prometheus, MetalLB and NFS client provisioner
+
+__Note:__ After Kubernetes is installed and configured, few Kubernetes and calico/flannel related ports are opened in the manager and compute nodes. This is required for Kubernetes Pod-to-Pod and Pod-to-Service communications. Calico/flannel provides a full networking stack for Kubernetes pods.
+
+The following __Slurm__ roles are provided by Omnia when __omnia.yml__ file is run:
+- **slurm_common** role:
+	- Installs the common packages on manager node and compute node.
+- **slurm_manager** role:
+	- Installs the packages only related to manager node
+	- This role also enables the required ports to be used by Slurm.  
+	    **tcp_ports**: 6817,6818,6819  
+		**udp_ports**: 6817,6818,6819
+	- Creating and updating the Slurm configuration files based on the manager node requirements.
+- **slurm_workers** role:
+	- Installs the Slurm packages into all compute nodes as per the compute node requirements.
+- **slurm_start_services** role: 
+	- Starting the Slurm services so that compute node communicates with manager node.
+- **slurm_exporter** role: 
+	- Slurm exporter is a package for exporting metrics collected from Slurm resource scheduling system to prometheus.
+	- Slurm exporter is installed on the host like Slurm, and Slurm exporter will be successfully installed only if Slurm is installed.
+
+## Adding a new compute node to the Cluster
+
+If a new node is provisioned through Cobbler, the node address is automatically displayed on the AWX dashboard. The node is not assigned to any group. You can add the node to the compute group and run `omnia.yml` to add the new node to the cluster and update the configurations in the manager node.

+ 6 - 0
docs/INVENTORY

@@ -0,0 +1,6 @@
+[compute]
+compute-01
+compute-02
+
+[manager]
+manager-01

+ 81 - 0
docs/MONITOR_CLUSTERS.md

@@ -0,0 +1,81 @@
+# Monitor Kuberentes and Slurm
+Omnia provides playbooks to configure additional software components for Kubernetes such as JupyterHub and Kubeflow. For workload management (submitting, conrolling, and managing jobs) of HPC, AI, and Data Analytics clusters, you can access Kubernetes and Slurm dashboards and other supported applications. 
+
+__Note:__ To access the below dashboards, user has to login to the manager node and open the installed web browser.
+
+__Note:__ If you are connecting remotely make sure your putty or any other similar client supports X11 forwarding. If you are using mobaxterm version 8 and above, follow the below mentioned steps:
+1. To provide __ssh__ to the manager node.
+   `ssh -x root@<ip>` (where ip is the private ip of manager node)
+2. `yum install firefox -y`
+3. `yum install xorg-x11-xauth`
+4. `export DISPLAY=:10.0`
+5. `logout and login back`
+6. To launch firefox from terminal use the following command: 
+   `firefox&`
+
+__Note:__ Everytime user logouts, the user have to run __export DISPLAY=:10.0__ command.
+
+## Access Kuberentes Dashboard
+1. To verify if the __Kubernetes-dashboard service__ is __running__, run the following command:
+  `kubectl get pods --namespace kubernetes-dashboard`
+2. To start the Kubernetes dashboard, run the following command:
+  `kubectl proxy`
+3. From the CLI, run the following command to see the generated tokens: `kubectl get secrets`
+4. Copy the token with the name __prometheus-__-kube-state-metrics__ of the type __kubernetes.io/service-account-token__.
+5. Run the following command: `kubectl describe secret __<copied token name>__`
+6. Copy the encrypted token value.
+7. On a web browser(installed on the manager node), enter http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ to access the Kubernetes Dashboard.
+8. Select the authentication method as __Token__.
+9. On the Kuberenetes Dashboard, paste the copied encrypted token and click __Sign in__.
+
+## Access Kubeflow Dashboard
+
+__Note:__ Use only port number between __8000-8999__
+__Note:__ Suggested port number : 8085
+
+1. To see which are the ports are in use, use the following command:
+   `netstat -an`
+2. Choose port number from __8000-8999__ which is not in use.
+3. To run the __kubeflow__ dashboard at selected port number, run the following command:
+   `kubectl port-forward -n kubeflow service/centraldashboard __selected_port_number__:80`
+4. On a web browser installed on the __manager node__, go to http://localhost:selected-port-number/ to launch the kubeflow central navigation dashboard.
+
+## Access JupyterHub Dashboard
+If you have installed the JupyterHub application for Kubernetes, you can access the dashboard by following these actions:
+1. To verify if the JupyterHub services are running, run the following command: 
+   `kubectl get pods --namespace default`
+2. Ensure that the pod names starting with __hub__ and __proxy__ are in __running__ status.
+3. Run the following command:
+   `kubectl get services`
+4. Copy the **External IP** of __proxy-public__ service.
+5. On a web browser installed on the __manager node__, use the External IP address to access the JupyterHub Dashboard.
+6. Enter any __username__ and __password__ combination to enter the Jupyterhub. The __username__ and __password__ can be later configured from the JupyterHub dashboard.
+
+## Prometheus:
+
+* Prometheus is installed in two different ways:
+  * Prometheus is installed on the host when Slurm is installed without installing kubernetes.
+  * Prometheus is installed as a Kubernetes role, if you install both Slurm and Kubernetes.
+
+If Prometheus is installed as part of k8s role, run the following commands before starting the Prometheus UI:
+1. `export POD_NAME=$(kubectl get pods --namespace default -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}")`
+2. `echo $POD_NAME`
+3. `kubectl --namespace default port-forward $POD_NAME 9090`
+
+__Note:__ If Prometheus is installed on the host, start the Prometheus web server with the following command:
+* Navigate to Prometheus folder. The default path is __/var/lib/prometheus-2.23.0.linux-amd64/__.
+* Start the web server, 
+  `./prometheus.yml`
+
+Go to http://localhost:9090 to launch the Prometheus UI in the browser.
+
+
+
+
+ 
+
+
+
+
+
+

+ 28 - 0
docs/PREINSTALL_OMNIA.md

@@ -0,0 +1,28 @@
+# Pre-Installation Preparation
+
+## Assumptions
+Omnia assumes that prior to installation:
+* The manager and compute nodes must be installed with CentOS 7.9 2009 OS.
+* Network(s) has been cabled and nodes can reach the Internet.
+* SSH Keys for root have been installed on all nodes to allow for password-less SSH.
+* On the manager node, install Ansible and Git using the following commands:
+	* `yum install epel-release -y`
+	* `yum install ansible git -y`  
+__Note:__ Ansible must be installed using __yum__. If Ansible is installed using __pip3__, re-install it using the __yum__ command again.
+
+
+## Example system designs
+Omnia can configure systems which use Ethernet- or Infiniband-based fabric to connect the compute servers.
+
+![Example system configuration with Ethernet fabric](images/example-system-ethernet.png)
+
+![Example system configuration with Infiniband fabric](images/example-system-infiniband.png)
+
+## Network Setup
+Omnia assumes that servers are already connected to the network and have access to the internet.
+### Network Topology
+Possible network configurations include:
+* A flat topology where all nodes are connected to a switch which includes an uplink to the internet. This requires multiple externally-facing IP addresses
+* A hierarchical topology where compute nodes are connected to a common switch, but the manager node contains a second network connection which is connected to the internet. All outbound/inbound traffic would be routed through the manager node. This requires setting up firewall rules for IP masquerade, see [here](https://www.server-world.info/en/note?os=CentOS_7&p=firewalld&f=2) for an example.
+### IP and Hostname Assignment
+The recommended setup is to assign IP addresses to individual servers. This can be done manually by logging onto each node, or via DHCP.

+ 37 - 0
docs/PREINSTALL_OMNIA_APPLIANCE.md

@@ -0,0 +1,37 @@
+# Prerequisites
+
+Ensure that the following prequisites are met before installing Omnia:
+* On the management node, install Ansible and Git using the following commands:
+	* `yum install epel-release -y`
+	* `yum install ansible git -y`
+__Note:__ Ansible must be installed using __yum__. If Ansible is installed using __pip3__, re-install it using the __yum__ command again.
+* Ensure a stable Internet connection is available on management node and target nodes. 
+* CentOS 7.9 2009 is installed on the management node.
+* To provision the bare metal servers:
+	* Go to http://isoredirect.centos.org/centos/7/isos/x86_64/ and download the **CentOS-7-x86_64-Minimal-2009** ISO file to the following directory on the management node: `omnia/appliance/roles/provision/files`.
+	* Rename the downloaded ISO file to `CentOS-7-x86_64-Minimal-2009.iso`.
+* For DHCP configuration, you can provide a mapping file named mapping_file.csv under __omnia/appliance/roles/provision/files__. The details provided in the CSV file must be in the format: MAC, Hostname, IP. For example, `xx:xx:4B:C4:xx:44,validation01,172.17.0.81` and  `xx:xx:4B:C5:xx:52,validation02,172.17.0.82` are valid entries.
+__Note:__ Duplicate hostnames must not be provided in the mapping file and the hostname should not contain these characters: "_" and "."
+* Connect one of the Ethernet cards on the management node to the HPC switch and one of the ethernet card connected to the global network.
+* If SELinux is not disabled on the management node, disable it from `/etc/sysconfig/selinux` and restart the management node.
+* The default mode of PXE is __UEFI__ and the BIOS Legacy Mode is not supported.
+* The default boot order for the bare metal servers must be __PXE__.
+* Configuration of __RAID__ is not part of Omnia. If bare metal servers have __RAID__ controller installed then it is mandatory to create **VIRTUAL DISK**.
+
+## Assumptions
+
+## Example system designs
+Omnia can configure systems which use Ethernet- or Infiniband-based fabric to connect the compute servers.
+
+![Example system configuration with Ethernet fabric](images/example-system-ethernet.png)
+
+![Example system configuration with Infiniband fabric](images/example-system-infiniband.png)
+
+## Network Setup
+Omnia assumes that servers are already connected to the network and have access to the internet.
+### Network Topology
+Possible network configurations include:
+* A flat topology where all nodes are connected to a switch which includes an uplink to the internet. This requires multiple externally-facing IP addresses
+* A hierarchical topology where compute nodes are connected to a common switch, but the manager node contains a second network connection which is connected to the internet. All outbound/inbound traffic would be routed through the manager node. This requires setting up firewall rules for IP masquerade, see [here](https://www.server-world.info/en/note?os=CentOS_7&p=firewalld&f=2) for an example.
+### IP and Hostname Assignment
+The recommended setup is to assign IP addresses to individual servers. This can be done manually by logging onto each node, or via DHCP.

文件差異過大導致無法顯示
+ 51 - 7
docs/README.md


+ 55 - 18
omnia.yml

@@ -13,7 +13,12 @@
 # limitations under the License.
 ---
 
-# Omnia playbook. Will be updated later.
+- name: Validate the cluster
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  roles:
+    - cluster_validation
 
 - name: Gather facts from all the nodes
   hosts: all
@@ -23,62 +28,87 @@
   gather_facts: false
   roles:
     - common
- 
-- name: Apply GPU node config
-  hosts: gpus
+  tags: common
+
+- name: Apply common K8s installation and config
+  hosts: manager, compute
   gather_facts: false
   roles:
-    - compute_gpu
+    - k8s_common
+  tags: kubernetes
 
 - name: Apply K8s manager config
   hosts: manager
   gather_facts: true
   roles:
-    - manager
+    - k8s_manager
+  tags: kubernetes
 
 - name: Apply K8s firewalld config on manager and compute nodes
   hosts: manager, compute
   gather_facts: false
   roles:
-    - firewalld
+    - k8s_firewalld
+  tags: kubernetes
+
+- name: Apply NFS server setup on manager node
+  hosts: manager
+  gather_facts: false
+  roles:
+    - k8s_nfs_server_setup
+  tags:
+    - kubernetes
+    - nfs
+
+- name: Apply NFS client setup on compute nodes
+  hosts: compute
+  gather_facts: false
+  roles:
+    - k8s_nfs_client_setup
+  tags:
+    - kubernetes
+    - nfs
 
 - name: Start K8s on manager server
   hosts: manager
   gather_facts: true
   roles:
-    - startmanager
+    - k8s_start_manager
+  tags: kubernetes
 
 - name: Start K8s worker servers on compute nodes
   hosts: compute
   gather_facts: false
   roles:
-    - startworkers
+    - k8s_start_workers
+  tags: kubernetes
 
 - name: Start K8s worker servers on manager nodes
   hosts: manager
   gather_facts: false
   roles:
-    - startservices
+    - k8s_start_services
+  tags: kubernetes
 
-- name: Apply SLURM manager config
-  hosts: manager
+- name: Apply common Slurm installation and config
+  hosts: manager, compute
   gather_facts: false
   roles:
-    - slurm_manager
+    - slurm_common
   tags: slurm
 
-- name: Apply common Slurm installation and config
-  hosts: manager, compute
+- name: Apply Slurm manager config
+  hosts: manager
   gather_facts: false
   roles:
-    - slurm_common
+    - slurm_manager
   tags: slurm
 
-- name: Start slurm workers
+- name: Start Slurm workers
   hosts: compute
   gather_facts: false
   roles:
-    - start_slurm_workers
+    - slurm_workers
   tags: slurm
 
 - name: Start Slurm services
@@ -87,3 +117,10 @@
   roles:
     - slurm_start_services
   tags: slurm
+
+- name: Install slurm exporter
+  hosts: manager
+  gather_facts: false
+  roles:
+    - slurm_exporter
+  tags: slurm

+ 24 - 0
omnia_config.yml

@@ -0,0 +1,24 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Password used for Slurm database.
+# The Length of the password should be atleast 8.
+# The password must not contain -,\, ',"
+mariadb_password: "password"
+
+# Kubernetes SDN network.
+# It can either be "calico" or "flannel".
+# Default value assigned is "calico".
+k8s_cni: "calico"

+ 17 - 13
platforms/roles/kubeflow/tasks/main.yml

@@ -70,6 +70,22 @@
     regexp: 'memory: 40Mi'
     replace: 'memory: 256Mi'
 
+- name: Modify memory request for istio-engressgateway-service-account
+  replace:
+    path: "{{ istio_noauth_yaml_file_path }}"
+    after: 'serviceAccountName: istio-egressgateway-service-account'
+    before: '---'
+    regexp: 'memory: 128Mi'
+    replace: 'memory: 256Mi'
+
+- name: Modify memory request for istio-engressgateway-service-account
+  replace:
+    path: "{{ istio_noauth_yaml_file_path }}"
+    after: 'serviceAccountName: istio-egressgateway-service-account'
+    before: '---'
+    regexp: 'memory: 40Mi'
+    replace: 'memory: 128Mi'
+
 - name: Modify CPU limit for kfserving-gateway
   replace:
     path: "{{ kfserving_gateway_yaml_file_path }}"
@@ -114,20 +130,8 @@
     regexp: 'NodePort'
     replace: 'LoadBalancer'
 
-- name: Remove cert-manager application block
-  replace:
-    path: "{{ kubeflow_config_file }}"
-    regexp: "{{ cert_manager_block }}"
-    replace: "\n"
-
-- name: Remove seldon-core-operator application block
-  replace:
-    path: "{{ kubeflow_config_file }}"
-    regexp: "{{ seldon_core_operator_block }}"
-    replace: "\n"
-
 - name: Apply kubeflow configuration
   command:
     cmd: "/usr/bin/kfctl apply -V -f '{{ kubeflow_config_file }}'"
     chdir: "{{ omnia_kubeflow_dir_path }}"
-  changed_when: true
+  changed_when: true

+ 0 - 22
platforms/roles/kubeflow/vars/main.yml

@@ -32,25 +32,3 @@ kfserving_gateway_yaml_file_path: "{{ omnia_kubeflow_dir_path }}/kustomize/kfser
 argo_yaml_file_path: "{{ omnia_kubeflow_dir_path }}/kustomize/argo/base/service.yaml"
 
 kubeflow_config_file: "{{ omnia_kubeflow_dir_path }}/kfctl_k8s_istio.v1.0.2.yaml"
-
-cert_manager_block: >
-    - kustomizeConfig:
-          overlays:
-          - self-signed
-          - application
-          parameters:
-          - name: namespace
-            value: cert-manager
-          repoRef:
-            name: manifests
-            path: cert-manager/cert-manager
-        name: cert-manager
-
-seldon_core_operator_block: >
-    - kustomizeConfig:
-          overlays:
-          - application
-          repoRef:
-            name: manifests
-            path: seldon/seldon-core-operator
-        name: seldon-core-operator

+ 87 - 0
roles/cluster_validation/tasks/fetch_password.yml

@@ -0,0 +1,87 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Check if omnia_vault_key exists
+  stat:
+    path: "{{ role_path }}/../../{{ config_vaultname }}"
+  register: vault_key_result
+
+- name: Create ansible vault key if it does not exist
+  set_fact:
+    vault_key: "{{ lookup('password', '/dev/null chars=ascii_letters') }}"
+  when: not vault_key_result.stat.exists
+
+- name: Save vault key
+  copy:
+    dest: "{{ role_path }}/../../{{ config_vaultname }}"
+    content: |
+      {{ vault_key }}
+    owner: root
+    force: yes
+    mode: '0755'
+  when: not vault_key_result.stat.exists
+
+- name: Check if omnia config file is encrypted
+  command: cat {{ role_path }}/../../{{ config_filename }}
+  changed_when: false
+  register: config_content
+  no_log: True
+
+- name: Decrpyt omnia_config.yml
+  command: >-
+    ansible-vault decrypt {{ role_path }}/../../{{ config_filename }}
+    --vault-password-file {{ role_path }}/../../{{ config_vaultname }}
+  when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+
+- name: Include variable file omnia_config.yml
+  include_vars: "{{ role_path }}/../../{{ config_filename }}"
+  no_log: True
+
+- name: Validate input parameters are not empty
+  fail:
+    msg: "{{ input_config_failure_msg }}"
+  register: input_config_check
+  when:
+    - mariadb_password | length < 1 or
+      k8s_cni | length < 1
+
+- name: Assert mariadb_password
+  assert:
+    that:
+        - mariadb_password | length > min_length | int - 1
+        - mariadb_password | length < max_length | int + 1
+        - '"-" not in mariadb_password '
+        - '"\\" not in mariadb_password '
+        - '"\"" not in mariadb_password '
+        - " \"'\" not in mariadb_password "
+    success_msg: "{{ success_msg_mariadb_password }}"
+    fail_msg: "{{ fail_msg_mariadb_password }}"
+
+- name: Assert kubernetes cni
+  assert:
+    that: "('calico' in k8s_cni) or ('flannel' in k8s_cni)"
+    success_msg: "{{ success_msg_k8s_cni }}"
+    fail_msg: "{{ fail_msg_k8s_cni }}"
+
+- name: Save input variables from file
+  set_fact:
+    db_password: "{{ mariadb_password }}"
+    k8s_cni: "{{ k8s_cni }}"
+  no_log: True
+
+- name: Encrypt input config file
+  command: >-
+    ansible-vault encrypt {{ role_path }}/../../{{ config_filename }}
+    --vault-password-file {{ role_path }}/../../{{ config_vaultname }}
+  changed_when: false

+ 19 - 0
roles/cluster_validation/tasks/main.yml

@@ -0,0 +1,19 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Perform validations
+  include_tasks: validations.yml
+
+- name: Fetch passwords
+  include_tasks: fetch_password.yml

+ 30 - 0
roles/cluster_validation/tasks/validations.yml

@@ -0,0 +1,30 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Validate skip tags
+  fail:
+    msg: "{{ skip_tag_fail_msg }}"
+  when: "'slurm' in ansible_skip_tags and 'kubernetes' in ansible_skip_tags"
+
+- name: Manager group to contain exactly 1 node
+  assert:
+    that: "groups['manager'] | length | int == 1"
+    fail_msg: "{{ manager_group_fail_msg }}"
+    success_msg: "{{ manager_group_success_msg }}"
+
+- name: Compute group to contain atleast 1 node
+  assert:
+    that: "groups['compute'] | length | int >= 1"
+    fail_msg: "{{ compute_group_fail_msg }}"
+    success_msg: "{{ compute_group_success_msg }}"

+ 32 - 0
roles/cluster_validation/vars/main.yml

@@ -0,0 +1,32 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+#Usage: fetch_password.yml
+config_filename: "omnia_config.yml"
+config_vaultname: .omnia_vault_key
+min_length: 8
+max_length: 30
+fail_msg_mariadb_password: "maria_db password not given in correct format."
+success_msg_mariadb_password: "mariadb_password validated"
+success_msg_k8s_cni: "Kubernetes CNI Validated"
+fail_msg_k8s_cni: "Kubernetes CNI not correct."
+
+#Usage: validations.yml
+skip_tag_fail_msg: "Can't skip both slurm and kubernetes"
+manager_group_fail_msg: "manager group should contain exactly 1 node"
+manager_group_success_msg: "manager group check passed"
+compute_group_fail_msg: "compute group should contain atleast 1 node"
+compute_group_success_msg: "compute group check passed"
+disjoint_fail_msg: "manager and compute groups should be disjoint"
+disjoint_success_msg: "manager and compute groups are disjoint"

+ 1 - 1
roles/compute_gpu/files/daemon.json

@@ -6,4 +6,4 @@
     }
   },
   "default-runtime": "nvidia"
-}
+}

+ 20 - 0
roles/common/files/inventory.fact

@@ -0,0 +1,20 @@
+#!/bin/bash
+INVENTORY=$(mktemp lspci.XXXXXXXX)
+
+lspci > $INVENTORY
+
+NVIDIA_GPU=$(cat $INVENTORY | grep -i nvidia | wc -l)
+XILINX_FPGA=$(cat $INVENTORY | grep "Processing accelerators: Xilinx Corporation Device" | wc -l)
+INTEL_A10_FPGA=$(cat $INVENTORY | grep "Processing accelerators: Intel Corporation Device" | wc -l)
+AMD_GPU=$(cat $INVENTORY | grep "Display controller: Advanced Micro Devices, Inc. \[AMD/ATI\]" | wc -l)
+
+cat << EOF
+{
+	"xilinx_fpga" : $XILINX_FPGA,
+	"nvidia_gpu" : $NVIDIA_GPU,
+	"amd_gpu" : $AMD_GPU,
+	"intel_a10_fpga" : $INTEL_A10_FPGA
+}
+EOF
+
+rm -f $INVENTORY

+ 17 - 12
roles/common/handlers/main.yml

@@ -1,18 +1,23 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
 ---
 
-- name: Start and Enable docker service
-  service:
-    name: docker
-    state: restarted
-    enabled: yes
-  #tags: install
-
-- name: Start and Enable Kubernetes - kubelet
-  service:
-    name: kubelet
+- name: Restart ntpd
+  systemd:
+    name: ntpd
     state: started
     enabled: yes
-  #tags: install
 
 - name: Restart chrony
   service:
@@ -32,4 +37,4 @@
   register: chrony_src
   until:  chrony_src.stdout.find('^*') > -1
   retries: "{{ retry_count }}"
-  delay: "{{ delay_count }}"
+  delay: "{{ delay_count }}"

+ 35 - 0
roles/common/tasks/amd.yml

@@ -0,0 +1,35 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Add AMD ROCm repository
+  yum_repository:
+    name: ROCm
+    description: AMD GPU ROCm Repository
+    baseurl: https://repo.radeon.com/rocm/yum/rpm
+    gpgcheck: yes
+    gpgkey: https://repo.radeon.com/rocm/rocm.gpg.key
+    enabled: yes
+  tags: install
+
+- name: Install AMD ROCm drivers
+  package:
+    name: rocm-dkms
+    enablerepo: ROCm
+    state: present
+  tags: install
+
+- name: Reboot after installing GPU drivers
+  reboot:
+  tags: install

+ 21 - 45
roles/common/tasks/main.yml

@@ -13,14 +13,19 @@
 #  limitations under the License.
 ---
 
-- name: Add kubernetes repo
+- name: Create a custom fact directory on each host
+  file:
+    path: "{{ custom_fact_dir }}"
+    state: directory
+    mode: "{{ custom_fact_dir_mode }}"
+
+- name: Install accelerator discovery script
   copy:
-    src: kubernetes.repo
-    dest: "{{ k8s_repo_dest }}"
+    src: inventory.fact
+    dest: "{{ accelerator_discovery_script_dest }}"
     owner: root
     group: root
-    mode: "{{ k8s_repo_file_mode }}"
-  tags: install
+    mode: "{{ accelerator_discovery_script_mode }}"
 
 - name: Add elrepo GPG key
   rpm_key:
@@ -40,20 +45,6 @@
     dest: "{{ docker_repo_dest }}"
   tags: install
 
-- name: Update sysctl to handle incorrectly routed traffic when iptables is bypassed
-  copy:
-    src: k8s.conf
-    dest: "{{ k8s_conf_dest }}"
-    owner: root
-    group: root
-    mode: "{{ k8s_conf_file_mode }}"
-  tags: install
-
-- name: Update sysctl
-  command: /sbin/sysctl --system
-  changed_when: true
-  tags: install
-
 - name: Disable swap
   command: /sbin/swapoff -a
   changed_when: true
@@ -70,20 +61,8 @@
     state: present
   tags: install
 
-- name: Install k8s packages
-  package:
-    name: "{{ k8s_packages }}"
-    state: present
-  tags: install
-
-- name: Versionlock kubernetes
-  command: "yum versionlock '{{ item }}'"
-  args:
-    warn: false
-  with_items:
-    - "{{ k8s_packages }}"
-  changed_when: true
-  tags: install
+- name: Collect host facts (including acclerator information)
+  setup: ~
 
 - name: Install infiniBand support
   package:
@@ -91,19 +70,16 @@
     state: present
   tags: install
 
-- name: Start and enable docker service
-  service:
-    name: docker
-    state: restarted
-    enabled: yes
+- name: Deploy time ntp/chrony
+  include_tasks: ntp.yml
   tags: install
 
-- name: Start and enable kubernetes - kubelet
-  service:
-    name: kubelet
-    state: restarted
-    enabled: yes
+- name: Install Nvidia drivers and software components
+  include_tasks: nvidia.yml
+  when: ansible_local.inventory.nvidia_gpu > 0
+  tags: install
 
-- name: Deploy time ntp/chrony
-  include_tasks: ntp.yml
+- name: Install AMD GPU drivers and software components
+  include_tasks: amd.yml
+  when: ansible_local.inventory.amd_gpu > 0
   tags: install

+ 25 - 25
roles/common/tasks/ntp.yml

@@ -13,28 +13,28 @@
 #  limitations under the License.
 ---
 
-#- name: Deploy ntp servers
-#block:
-#- name: Deploy ntpd
-#package:
-#name: ntp
-#state: present
-#- name: Deploy ntpdate
-#package:
-#name: ntpdate
-#state: present
-#- name: Update ntp servers
-#template:
-#src: ntp.conf.j2
-#dest: "{{ ntp_path }}"
-#owner: root
-#group: root
-#mode: "{{ ntp_mode }}"
-          #backup: yes
-          #notify:
-          #- restart ntpd
-            #- sync ntp clocks
-            #when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  < os_higher_version
+  - name: Deploy ntp servers
+    block:
+      - name: Deploy ntpd
+        package:
+          name: ntp
+          state: present
+      - name: Deploy ntpdate
+        package:
+          name: ntpdate
+          state: present
+      - name: Update ntp servers
+        template:
+          src: ntp.conf.j2
+          dest: "{{ ntp_path }}"
+          owner: root
+          group: root
+          mode: "{{ ntp_mode }}"
+          backup: yes
+        notify:
+          - Restart ntpd
+          - Sync tp clocks
+    when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  < os_higher_version
 
   - name: Deploy chrony server
     block:
@@ -51,6 +51,6 @@
           mode: "{{ ntp_mode }}"
           backup: yes
         notify:
-          - restart chrony
-          - sync chrony sources
-    when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  > os_version
+          - Restart chrony
+          - Sync chrony sources
+    when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  > os_version

+ 37 - 17
roles/compute_gpu/tasks/main.yml

@@ -13,17 +13,44 @@
 #  limitations under the License.
 ---
 
-- name: Add nvidia-docker2 Repo
-  get_url:
-    url: "{{ nvidia_docker_repo_url }}"
-    dest: "{{ nvidia_docker_repo_dest }}"
-  tags: install, testing
-
 - name: Add libnvidia container Repo
-  get_url:
-    url: "{{ nvidia_container_repo_url }}"
-    dest: "{{ nvidia_container_repo_dest }}"
-  tags: install, testing
+  yum_repository:
+    name: libnvidia-container
+    description:  libnvidia-container
+    baseurl: https://nvidia.github.io/libnvidia-container/stable/centos7/$basearch
+    repo_gpgcheck: no
+    gpgcheck: no
+    gpgkey: https://nvidia.github.io/libnvidia-container/gpgkey
+    sslverify: yes
+    sslcacert: /etc/pki/tls/certs/ca-bundle.crt
+    enabled: yes
+  tags: install
+
+- name: Add nvidia-container-runtime Repo
+  yum_repository:
+    name: nvidia-container-runtime
+    description:  nvidia-container-runtime
+    baseurl: https://nvidia.github.io/nvidia-container-runtime/stable/centos7/$basearch
+    repo_gpgcheck: no
+    gpgcheck: no
+    gpgkey: https://nvidia.github.io/nvidia-container-runtime/gpgkey
+    sslverify: yes
+    sslcacert: /etc/pki/tls/certs/ca-bundle.crt
+    enabled: yes
+  tags: install
+
+- name: Add nvidia-docker Repo
+  yum_repository:
+    name: nvidia-docker
+    description:  nvidia-docker
+    baseurl: https://nvidia.github.io/nvidia-docker/centos7/$basearch
+    repo_gpgcheck: no
+    gpgcheck: no
+    gpgkey: https://nvidia.github.io/nvidia-docker/gpgkey
+    enabled: yes
+    sslverify: yes
+    sslcacert: /etc/pki/tls/certs/ca-bundle.crt
+  tags: install
 
 - name: Install nvidia driver and nvidia-docker2
   package:
@@ -52,10 +79,3 @@
     enabled: yes
     daemon_reload: yes
   tags: install
-
-- name: Restart and enable kubernetes - kubelet
-  service:
-    name: kubelet
-    state: restarted
-    enabled: yes
-  tags: install

+ 1 - 2
roles/common/templates/chrony.conf.j2

@@ -38,5 +38,4 @@ leapsectz right/UTC
 logdir /var/log/chrony
 
 # Select which information is logged.
-#log measurements statistics tracking
-
+#log measurements statistics tracking

+ 1 - 3
roles/common/templates/ntp.conf.j2

@@ -11,6 +11,4 @@ server  {{ item }} iburst
 
 includefile /etc/ntp/crypto/pw
 
-keys /etc/ntp/keys
-
-
+keys /etc/ntp/keys

+ 21 - 12
roles/common/vars/main.yml

@@ -19,17 +19,20 @@ common_packages:
   - gcc
   - nfs-utils
   - python3-pip
-  - docker-ce
   - bash-completion
   - nvidia-detect
   - chrony
+  - pciutils
+  - docker-ce
+  - openssl
+
+custom_fact_dir: /etc/ansible/facts.d
 
-k8s_packages:
-  - kubelet-1.16.7
-  - kubeadm-1.16.7
-  - kubectl-1.16.7
+custom_fact_dir_mode: 0755
 
-k8s_repo_dest: /etc/yum.repos.d/
+accelerator_discovery_script_dest: /etc/ansible/facts.d/inventory.fact
+
+accelerator_discovery_script_mode: 0755
 
 elrepo_gpg_key_url: https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
 
@@ -39,12 +42,6 @@ docker_repo_url: https://download.docker.com/linux/centos/docker-ce.repo
 
 docker_repo_dest: /etc/yum.repos.d/docker-ce.repo
 
-k8s_conf_dest: /etc/sysctl.d/
-
-k8s_repo_file_mode: 0644
-
-k8s_conf_file_mode: 0644
-
 chrony_path: "/etc/chrony.conf"
 ntp_path: "/etc/ntp.conf"
 ntp_mode: "0644"
@@ -61,3 +58,15 @@ ntp_servers:
   - 2.centos.pool.ntp.org
 chrony_servers:
   - 2.centos.pool.ntp.org
+
+nvidia_docker_repo_url: https://nvidia.github.io/nvidia-docker/centos7/nvidia-docker.repo
+nvidia_docker_repo_dest: /etc/yum.repos.d/nvidia-docker.repo
+nvidia_container_repo_url: https://nvidia.github.io/libnvidia-container/centos7/libnvidia-container.repo
+nvidia_container_repo_dest: /etc/yum.repos.d/libnvidia-container.repo
+
+nvidia_packages:
+  - kmod-nvidia
+  - nvidia-docker2
+
+daemon_file_dest: /etc/docker/
+daemon_file_mode: 0644

+ 0 - 3
roles/compute_gpu/files/k8s.conf

@@ -1,3 +0,0 @@
-net.bridge.bridge-nf-call-ip6tables = 1
-net.bridge.bridge-nf-call-iptables = 1
-

+ 0 - 8
roles/compute_gpu/files/kubernetes.repo

@@ -1,8 +0,0 @@
-[kubernetes]
-name=Kubernetes
-baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
-enabled=1
-gpgcheck=1
-repo_gpgcheck=1
-gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
-

roles/common/files/k8s.conf → roles/k8s_common/files/k8s.conf


roles/common/files/kubernetes.repo → roles/k8s_common/files/kubernetes.repo


+ 28 - 0
roles/k8s_common/handlers/main.yml

@@ -0,0 +1,28 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Start and Enable docker service
+  service:
+    name: docker
+    state: restarted
+    enabled: yes
+  tags: install
+
+- name: Start and Enable Kubernetes - kubelet
+  service:
+    name: kubelet
+    state: started
+    enabled: yes
+  tags: install

+ 69 - 0
roles/k8s_common/tasks/main.yml

@@ -0,0 +1,69 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Add kubernetes repo
+  yum_repository:
+    name: kubernetes
+    description: kubernetes
+    baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+    enabled: yes
+    gpgcheck: no
+    repo_gpgcheck: no
+    gpgkey:
+      - https://packages.cloud.google.com/yum/doc/yum-key.gpg
+      - https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+  tags: install
+
+- name: Update sysctl to handle incorrectly routed traffic when iptables is bypassed
+  copy:
+    src: k8s.conf
+    dest: "{{ k8s_conf_dest }}"
+    owner: root
+    group: root
+    mode: "{{ k8s_conf_file_mode }}"
+  tags: install
+
+- name: Update sysctl
+  command: /sbin/sysctl --system
+  changed_when: true
+  tags: install
+
+- name: Install k8s packages
+  package:
+    name: "{{ k8s_packages }}"
+    state: present
+  tags: install
+
+- name: Versionlock kubernetes
+  command: "yum versionlock '{{ item }}'"
+  args:
+    warn: false
+  with_items:
+    - "{{ k8s_packages }}"
+  changed_when: true
+  tags: install
+
+- name: Start and enable docker service
+  service:
+    name: docker
+    state: restarted
+    enabled: yes
+  tags: install
+
+- name: Start and enable kubernetes - kubelet
+  service:
+    name: kubelet
+    state: restarted
+    enabled: yes

+ 27 - 0
roles/k8s_common/vars/main.yml

@@ -0,0 +1,27 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+k8s_packages:
+  - kubelet-1.16.7
+  - kubeadm-1.16.7
+  - kubectl-1.16.7
+
+k8s_repo_dest: /etc/yum.repos.d/
+
+k8s_conf_dest: /etc/sysctl.d/
+
+k8s_repo_file_mode: 0644
+
+k8s_conf_file_mode: 0644

+ 6 - 6
roles/firewalld/tasks/main.yml

@@ -40,8 +40,8 @@
     port: "{{ item }}/tcp"
     permanent: yes
     state: enabled
-  with_items: '{{ k8s_worker_ports }}'
-  when: "'compute' in group_names"
+  with_items: '{{ k8s_compute_ports }}'
+  when: "'compute' in group_names and groups['manager'][0] != groups['compute'][0] and groups['compute']|length >= 1"
   tags: firewalld
 
 - name: Open flannel ports on the firewall
@@ -50,7 +50,7 @@
     permanent: yes
     state: enabled
   with_items: "{{ flannel_udp_ports }}"
-  when: k8s_cni == "flannel"
+  when: hostvars['127.0.0.1']['k8s_cni'] == "flannel"
   tags: firewalld
 
 - name: Open calico UDP ports on the firewall
@@ -59,7 +59,7 @@
     permanent: yes
     state: enabled
   with_items: "{{ calico_udp_ports }}"
-  when: k8s_cni == "calico"
+  when: hostvars['127.0.0.1']['k8s_cni'] == "calico"
   tags: firewalld
 
 - name: Open calico TCP ports on the firewall
@@ -68,7 +68,7 @@
     permanent: yes
     state: enabled
   with_items: "{{ calico_tcp_ports }}"
-  when: k8s_cni == "calico"
+  when: hostvars['127.0.0.1']['k8s_cni'] == "calico"
   tags: firewalld
 
 - name: Reload firewalld
@@ -81,4 +81,4 @@
     name: firewalld
     state: stopped
     enabled: no
-  tags: firewalld
+  tags: firewalld

+ 0 - 0
roles/firewalld/vars/main.yml


部分文件因文件數量過多而無法顯示