瀏覽代碼

Merge pull request #284 from dellhpc/rc-1.0

Omnia v1.0.0
John Lockman 4 年之前
父節點
當前提交
a7275b9003
共有 100 個文件被更改,包括 8252 次插入669 次删除
  1. 66 0
      .github/workflows/ansible-lint.yml
  2. 1 0
      .metadata/omnia_version
  3. 2 2
      CONTRIBUTING.md
  4. 3 1
      README.md
  5. 8 8
      kubernetes/jupyterhub.yaml
  6. 49 0
      appliance/appliance_config.yml
  7. 20 0
      appliance/inventory.yml
  8. 3 0
      appliance/roles/common/files/daemon.json
  9. 86 0
      appliance/roles/common/tasks/docker_installation.yml
  10. 19 0
      appliance/roles/common/tasks/docker_volume.yml
  11. 25 0
      appliance/roles/common/tasks/internet_validation.yml
  12. 35 0
      appliance/roles/common/tasks/main.yml
  13. 6 9
      slurm/roles/start-slurm-workers/tasks/main.yml
  14. 337 0
      appliance/roles/common/tasks/password_config.yml
  15. 46 0
      appliance/roles/common/tasks/pre_requisite.yml
  16. 94 0
      appliance/roles/common/vars/main.yml
  17. 47 0
      appliance/roles/inventory/files/add_host.yml
  18. 148 0
      appliance/roles/inventory/files/create_inventory.yml
  19. 100 0
      appliance/roles/inventory/tasks/main.yml
  20. 16 0
      appliance/roles/inventory/vars/main.yml
  21. 51 0
      appliance/roles/provision/files/Dockerfile
  22. 469 0
      appliance/roles/provision/files/cobbler_settings
  23. 43 0
      appliance/roles/provision/files/inventory_creation.yml
  24. 121 0
      appliance/roles/provision/files/kickstart.yml
  25. 84 0
      appliance/roles/provision/files/modules.conf
  26. 11 20
      slurm/slurm.yml
  27. 64 0
      appliance/roles/provision/files/temp_centos7.ks
  28. 93 0
      appliance/roles/provision/files/temp_dhcp.template
  29. 19 0
      appliance/roles/provision/files/tftp
  30. 46 0
      appliance/roles/provision/files/tftp.yml
  31. 87 0
      appliance/roles/provision/tasks/check_prerequisites.yml
  32. 30 0
      appliance/roles/provision/tasks/cobbler_image.yml
  33. 56 0
      appliance/roles/provision/tasks/configure_cobbler.yml
  34. 60 0
      appliance/roles/provision/tasks/dhcp_configure.yml
  35. 64 0
      appliance/roles/provision/tasks/firewall_settings.yml
  36. 66 0
      appliance/roles/provision/tasks/main.yml
  37. 166 0
      appliance/roles/provision/tasks/mapping_file.yml
  38. 44 0
      appliance/roles/provision/tasks/mount_iso.yml
  39. 89 0
      appliance/roles/provision/tasks/provision_password.yml
  40. 45 0
      appliance/roles/provision/vars/main.yml
  41. 284 0
      appliance/roles/web_ui/tasks/awx_configuration.yml
  42. 40 0
      appliance/roles/web_ui/tasks/check_awx_status.yml
  43. 22 0
      appliance/roles/web_ui/tasks/clone_awx.yml
  44. 40 0
      appliance/roles/web_ui/tasks/firewall_settings.yml
  45. 64 0
      appliance/roles/web_ui/tasks/install_awx.yml
  46. 34 0
      appliance/roles/web_ui/tasks/install_awx_cli.yml
  47. 76 0
      appliance/roles/web_ui/tasks/main.yml
  48. 85 0
      appliance/roles/web_ui/tasks/ui_accessibility.yml
  49. 69 0
      appliance/roles/web_ui/vars/main.yml
  50. 49 0
      appliance/test/appliance_config_empty.yml
  51. 49 0
      appliance/test/appliance_config_test.yml
  52. 3 0
      appliance/test/provisioned_hosts.yml
  53. 1882 0
      appliance/test/test_common.yml
  54. 2 0
      appliance/test/test_mapping_file
  55. 608 0
      appliance/test/test_provision_cc.yml
  56. 183 0
      appliance/test/test_provision_cdip.yml
  57. 294 0
      appliance/test/test_provision_ndod.yml
  58. 51 0
      appliance/test/test_vars/test_common_vars.yml
  59. 85 0
      appliance/test/test_vars/test_provision_vars.yml
  60. 35 0
      appliance/test/test_vars/test_web_ui_vars.yml
  61. 378 0
      appliance/test/test_web_ui.yml
  62. 40 0
      appliance/tools/passwordless_ssh.yml
  63. 81 0
      appliance/tools/provision_report.yml
  64. 36 0
      appliance/tools/roles/cluster_preperation/tasks/main.yml
  65. 84 0
      appliance/tools/roles/cluster_preperation/tasks/passwordless_ssh.yml
  66. 19 0
      appliance/tools/roles/cluster_preperation/vars/main.yml
  67. 44 0
      appliance/tools/roles/fetch_password/tasks/main.yml
  68. 91 0
      appliance/tools/roles/hpc_cluster_report/tasks/main.yml
  69. 38 0
      appliance/tools/roles/hpc_cluster_report/templates/provision_host_report.j2
  70. 100 0
      docs/FAQ.md
  71. 0 105
      docs/INSTALL.md
  72. 117 0
      docs/INSTALL_OMNIA.md
  73. 189 0
      docs/INSTALL_OMNIA_APPLIANCE.md
  74. 6 0
      docs/INVENTORY
  75. 93 0
      docs/MONITOR_CLUSTERS.md
  76. 0 27
      docs/PREINSTALL.md
  77. 28 0
      docs/PREINSTALL_OMNIA.md
  78. 36 0
      docs/PREINSTALL_OMNIA_APPLIANCE.md
  79. 78 13
      docs/README.md
  80. 1 0
      docs/_config.yml
  81. 二進制
      docs/images/omnia-branch-structure.png
  82. 二進制
      docs/images/omnia-overview.png
  83. 1 1
      examples/README.md
  84. 3 6
      kubernetes/host_inventory_file
  85. 13 0
      examples/host_inventory_file.ini
  86. 2 0
      examples/mapping_file.csv
  87. 0 55
      kubernetes/kubernetes.yml
  88. 0 3
      kubernetes/roles/common/files/nvidia
  89. 0 21
      kubernetes/roles/common/handlers/main.yml
  90. 0 140
      kubernetes/roles/common/tasks/main.yml
  91. 0 10
      kubernetes/roles/common/vars/main.yml
  92. 0 3
      kubernetes/roles/computeGPU/files/k8s.conf
  93. 0 8
      kubernetes/roles/computeGPU/files/kubernetes.repo
  94. 0 3
      kubernetes/roles/computeGPU/files/nvidia
  95. 0 21
      kubernetes/roles/computeGPU/handlers/main.yml
  96. 0 78
      kubernetes/roles/computeGPU/tasks/main.yml
  97. 0 10
      kubernetes/roles/computeGPU/vars/main.yml
  98. 0 122
      kubernetes/roles/kubeflow/tasks/main.yml
  99. 0 3
      kubernetes/roles/master/files/k8s.conf
  100. 0 0
      kubernetes/roles/master/files/kubernetes.repo

+ 66 - 0
.github/workflows/ansible-lint.yml

@@ -0,0 +1,66 @@
+name: Ansible Lint  # feel free to pick your own name
+
+on:
+  pull_request:
+    branches:
+      - release
+      - devel
+
+jobs:
+  ansible-lint:
+
+    runs-on: ubuntu-latest
+
+    steps:
+    # Important: This sets up your GITHUB_WORKSPACE environment variable
+    - uses: actions/checkout@v2
+
+    - name: ansible-lint 
+      # replace "master" with any valid ref
+      uses: ansible/ansible-lint-action@master
+      with:
+        # [required]
+        # Paths to ansible files (i.e., playbooks, tasks, handlers etc..)
+        # or valid Ansible directories according to the Ansible role
+        # directory structure.
+        # If you want to lint multiple ansible files, use the following syntax
+        # targets: |
+        #   playbook_1.yml
+        #   playbook_2.yml
+        targets: |
+          /github/workspace/omnia.yml
+          /github/workspace/omnia_config.yml
+          /github/workspace/platforms/jupyterhub.yml
+          /github/workspace/platforms/kubeflow.yml
+          /github/workspace/tools/install_tools.yml
+          /github/workspace/tools/intel_tools.yml
+          /github/workspace/tools/olm.yml
+        # [optional]
+        # Arguments to override a package and its version to be set explicitly.
+        # Must follow the example syntax.
+        #override-deps: |
+        #  ansible==2.9
+        #  ansible-lint==4.2.0
+        # [optional]
+        # Arguments to be passed to the ansible-lint
+
+        # Options:
+        #   -q                    quieter, although not silent output
+        #   -p                    parseable output in the format of pep8
+        #   --parseable-severity  parseable output including severity of rule
+        #   -r RULESDIR           specify one or more rules directories using one or
+        #                         more -r arguments. Any -r flags override the default
+        #                         rules in ansiblelint/rules, unless -R is also used.
+        #   -R                    Use default rules in ansiblelint/rules in addition to
+        #                         any extra
+        #                         rules directories specified with -r. There is no need
+        #                         to specify this if no -r flags are used
+        #   -t TAGS               only check rules whose id/tags match these values
+        #   -x SKIP_LIST          only check rules whose id/tags do not match these
+        #                         values
+        #   --nocolor             disable colored output
+        #   --exclude=EXCLUDE_PATHS
+        #                         path to directories or files to skip. This option is
+        #                         repeatable.
+        #   -c C                  Specify configuration file to use. Defaults to ".ansible-lint"
+        args: "-x 305"

+ 1 - 0
.metadata/omnia_version

@@ -0,0 +1 @@
+Omnia version 1.0.0

+ 2 - 2
CONTRIBUTING.md

@@ -32,7 +32,7 @@ Contributions to Omnia are made through [Pull Requests (PRs)](https://help.githu
 6. **Create a pull request:** [Create a pull request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request) with a title following this format Issue ###: Description (_i.e., Issue 1023: Reformat testutils_). It is important that you do a good job with the description to make the job of the code reviewer easier. A good description not only reduces review time, but also reduces the probability of a misunderstanding with the pull request.
    * **Important:** When preparing a pull request it is important to stay up-to-date with the project repository. We recommend that you rebase against the upstream repo _frequently_. To do this, use the following commands:
    ```
-   git pull --rebase upstream master #upstream is dellhpc/omnia
+   git pull --rebase upstream devel #upstream is dellhpc/omnia
    git push --force origin <pr-branch-name> #origin is your fork of the repository (e.g., <github_user_name>/omnia.git)
    ```
    * **PR Description:** Be sure to fully describe the pull request. Ideally, your PR description will contain:
@@ -42,7 +42,7 @@ Contributions to Omnia are made through [Pull Requests (PRs)](https://help.githu
       4. How to verify that the changes work correctly.
    
 ## Omnia Branches and Contribution Flow
-The diagram below describes the contribution flow. Omnia has two lifetime branches: `devel` and `master`. The `master` branch is reserved for releases and their associated tags. The `devel` branch is where all development work occurs. The `devel` branch is also the default branch for the project.
+The diagram below describes the contribution flow. Omnia has two lifetime branches: `devel` and `release`. The `release` branch is reserved for releases and their associated tags. The `devel` branch is where all development work occurs. The `devel` branch is also the default branch for the project.
 
 ![Omnia Branch Flowchart](docs/images/omnia-branch-structure.png "Flowchart of Omnia branches")
 

+ 3 - 1
README.md

@@ -1,6 +1,8 @@
 <img src="docs/images/omnia-logo.png" width="500px">
 
-![GitHub](https://img.shields.io/github/license/dellhpc/omnia) ![GitHub issues](https://img.shields.io/github/issues-raw/dellhpc/omnia) ![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/dellhpc/omnia?include_prereleases) ![GitHub last commit (branch)](https://img.shields.io/github/last-commit/dellhpc/omnia/devel) ![GitHub commits since tagged version](https://img.shields.io/github/commits-since/dellhpc/omnia/omnia-v0.2/devel) 
+![GitHub](https://img.shields.io/github/license/dellhpc/omnia) ![GitHub issues](https://img.shields.io/github/issues-raw/dellhpc/omnia) ![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/dellhpc/omnia?include_prereleases) ![GitHub last commit (branch)](https://img.shields.io/github/last-commit/dellhpc/omnia/devel) ![GitHub commits since tagged version](https://img.shields.io/github/commits-since/dellhpc/omnia/v1.0.0/devel) 
+
+![GitHub contributors](https://img.shields.io/github/contributors-anon/dellhpc/omnia) ![GitHub forks](https://img.shields.io/github/forks/dellhpc/omnia) ![GitHub Repo stars](https://img.shields.io/github/stars/dellhpc/omnia) ![GitHub all releases](https://img.shields.io/github/downloads/dellhpc/omnia/total)
 
 #### Ansible playbook-based deployment of Slurm and Kubernetes on Dell EMC PowerEdge servers running an RPM-based Linux OS
 

+ 8 - 8
kubernetes/jupyterhub.yaml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved. 
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -11,12 +11,12 @@
 #  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
-
 ---
-#Playbook for installing JupyterHub v1.1.0 in Omnia
- 
-# Start K8s worker servers
-- hosts: master
-  gather_facts: false
+
+- name: Executing omnia roles
+  hosts: localhost
+  connection: local
   roles:
-    - jupyterhub
+    - common
+    - provision
+    - web_ui

+ 49 - 0
appliance/appliance_config.yml

@@ -0,0 +1,49 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Password used while deploying OS on bare metal servers and for Cobbler UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+provision_password: ""
+
+# Password used for the AWX UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+awx_password: ""
+
+# The nic/ethernet card that needs to be connected to the HPC switch.
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is em1.
+hpc_nic: "em1"
+
+# The nic/ethernet card that will be connected to the public internet.
+# Default value of nic is em2
+public_nic: "em2"
+
+# This is the  path where user has kept the iso image that needs to be provisioned in target nodes.
+# The iso file should be CentOS7-2009-minimal edition.
+# Other iso file not supported.
+iso_file_path: "" 
+
+# The mapping file consists of the MAC address and its respective IP address and hostname.
+# The format of mapping file should be MAC,hostname,IP and must be a CSV file.
+# A template for mapping file exists in omnia/examples and is named as mapping_file.csv.
+# This depicts the path where user has kept the mapping file for DHCP configurations.
+mapping_file_path: ""
+
+# The dhcp range for assigning the IPv4 address to the baremetal nodes.
+# Example: 10.1.23.1
+dhcp_start_ip_range: ""
+dhcp_end_ip_range: ""

+ 20 - 0
appliance/inventory.yml

@@ -0,0 +1,20 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Dynamic Inventory
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  roles:
+    - inventory

+ 3 - 0
appliance/roles/common/files/daemon.json

@@ -0,0 +1,3 @@
+{
+    "bip": "172.18.0.1/16"
+}

+ 86 - 0
appliance/roles/common/tasks/docker_installation.yml

@@ -0,0 +1,86 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Add docker repo
+  get_url:
+    url: "{{ docker_repo_url }}"
+    dest: "{{ docker_repo_dest }}"
+  tags: install
+
+- name: Enable docker edge and test repo
+  ini_file:
+    dest: "{{ docker_repo_dest }}"
+    section: "{{ item }}"
+    option: enabled
+    value: "{{ success }}"
+  with_items: ['docker-ce-test', 'docker-ce-edge']
+  tags: install
+
+- name: Install docker
+  package:
+    name: "{{ container_repo_install }}"
+    state: present
+  become: yes
+  tags: install
+
+- name: Start services
+  service:
+    name: "{{ container_type }}"
+    state: started
+    enabled: yes
+  become: yes
+  tags: install
+
+- name: Uninstall docker-py using pip
+  pip:
+    name: ['docker-py','docker']
+    state: absent
+  tags: install
+
+- name: Install docker using pip
+  pip:
+    name: docker
+    state: present
+  tags: install
+
+- name: Update pip
+  command: pip3 install --upgrade pip
+  changed_when: false
+
+- name: Installation using python3
+  pip:
+    name: "{{ docker_compose }}"
+    executable: pip3
+  tags: install
+
+- name: Versionlock docker
+  command: "yum versionlock '{{ item }}'"
+  args:
+    warn: false
+  with_items:
+    - "{{ container_repo_install }}"
+  changed_when: true
+  tags: install
+
+- name: Configure docker
+  copy:
+    src: daemon.json
+    dest: "{{ daemon_dest }}"
+  tags: install
+
+- name: Restart docker
+  service:
+    name: docker
+    state: restarted

+ 19 - 0
appliance/roles/common/tasks/docker_volume.yml

@@ -0,0 +1,19 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Create a docker volume
+  docker_volume:
+    name: "{{ docker_volume_name }}"
+

+ 25 - 0
appliance/roles/common/tasks/internet_validation.yml

@@ -0,0 +1,25 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Internet connectivity status
+  wait_for:
+    host: "{{ hostname }}"
+    port: "{{ port_no }}"
+    state: started
+    delay: "{{ internet_delay }}"
+    timeout: "{{ internet_timeout }}"
+    msg: "{{ internet_status }}"
+  register: internet_value
+  tags: install

+ 35 - 0
appliance/roles/common/tasks/main.yml

@@ -0,0 +1,35 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Mount Path
+  set_fact:
+    mount_path: "{{ role_path + '/../../..'  }}"
+
+- name: Pre-requisite validation
+  import_tasks: pre_requisite.yml
+
+- name: Internet validation
+  import_tasks: internet_validation.yml
+
+- name: Common packages installation
+  import_tasks: package_installation.yml
+
+- name: Basic Configuration
+  import_tasks: password_config.yml
+
+- name: Docker installation and configuration
+  import_tasks: docker_installation.yml
+
+- name: Docker volume creation
+  import_tasks: docker_volume.yml

+ 6 - 9
slurm/roles/start-slurm-workers/tasks/main.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved. 
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -11,13 +11,10 @@
 #  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
-
 ---
-- name: Install SLURM RPMs on compute
-  yum:
-    name: "{{ item }}"
-    #name: "{{ query('fileglob', ['/home/rpms/slurm*20*.rpm']) }}" <-- how it should work to avoid loop
-  with_fileglob:
-    - /home/rpms/slurm*20*.rpm
-  tags: install
 
+- name: Install packages
+  package:
+    name: "{{ common_packages }}"
+    state: present
+  tags: install

+ 337 - 0
appliance/roles/common/tasks/password_config.yml

@@ -0,0 +1,337 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Check input config file is encrypted
+  command: cat {{ input_config_filename }}
+  changed_when: false
+  register: config_content
+
+- name: Decrpyt appliance_config.yml
+  command: >-
+    ansible-vault decrypt {{ input_config_filename }}
+    --vault-password-file {{ vault_filename }}
+  changed_when: false
+  when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+
+- name: Include variable file appliance_config.yml
+  include_vars: "{{ input_config_filename }}"
+  no_log: true
+
+- name: Validate input parameters are not empty
+  fail:
+    msg: "{{ input_config_failure_msg }}"
+  register: input_config_check
+  when:
+    - provision_password | length < 1 or
+      awx_password | length < 1 or
+      hpc_nic | length < 1 or
+      public_nic | length < 1 or
+      iso_file_path | length < 1 or
+      dhcp_start_ip_range | length < 1 or
+      dhcp_end_ip_range | length < 1
+
+- name: Save input variables from file
+  set_fact:
+    cobbler_password: "{{ provision_password }}"
+    admin_password: "{{ awx_password }}"
+    nic:  "{{ hpc_nic }}"
+    internet_nic: "{{ public_nic }}"
+    path_for_iso_file: "{{ iso_file_path }}"
+    dhcp_start_ip: "{{ dhcp_start_ip_range | ipv4 }}"
+    dhcp_end_ip: "{{ dhcp_end_ip_range | ipv4 }}"
+    mapping_file: false
+    path_for_mapping_file: "{{ mapping_file_path }}"
+  no_log: true
+
+- name: Get the system hpc ip
+  shell:  "ifconfig {{ hpc_nic }} | grep 'inet' |cut -d: -f2 |  awk '{ print $2}'"
+  register: ip
+  changed_when: false
+
+- name: Get the system public ip
+  shell:  "ifconfig {{ internet_nic }} | grep 'inet' |cut -d: -f2 |  awk '{ print $2}'"
+  register: internet_ip
+  changed_when: false
+
+- name: Get the system netmask
+  shell:  "ifconfig {{ hpc_nic }} | grep 'inet' |cut -d: -f2 |  awk '{ print $4}'"
+  register: net
+  changed_when: false
+
+- name: HPC nic IP
+  set_fact:
+    hpc_ip: "{{ ip.stdout }}"
+    public_ip: "{{ internet_ip.stdout }}"
+
+- name:  Netmask
+  set_fact:
+    netmask: "{{ net.stdout }}"
+
+- name: shell try
+  shell: |
+    IFS=. read -r i1 i2 i3 i4 <<< "{{ hpc_ip }}"
+    IFS=. read -r m1 m2 m3 m4 <<< "{{ netmask }}"
+    printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
+  register: sub_result
+  changed_when: false
+
+- name: Subnet
+  set_fact:
+    subnet: "{{ sub_result.stdout }}"
+
+- name: Assert provision_password
+  assert:
+    that:
+      - cobbler_password | length > min_length | int - 1
+      - cobbler_password | length < max_length | int + 1
+      - '"-" not in cobbler_password '
+      - '"\\" not in cobbler_password '
+      - '"\"" not in cobbler_password '
+      - " \"'\" not in cobbler_password "
+    success_msg: "{{ success_msg_provision_password }}"
+    fail_msg: "{{ fail_msg_provision_password }}"
+  no_log: true
+  register: cobbler_password_check
+
+- name: Assert awx_password
+  assert:
+    that:
+        - admin_password | length > min_length | int - 1
+        - admin_password | length < max_length | int + 1
+        - '"-" not in admin_password '
+        - '"\\" not in admin_password '
+        - '"\"" not in admin_password '
+        - " \"'\" not in admin_password "
+    success_msg: "{{ success_msg_awx_password }}"
+    fail_msg: "{{ fail_msg_awx_password }}"
+  no_log: true
+  register: awx_password_check
+
+- name: Assert hpc_ip
+  assert:
+    that:
+      - hpc_ip | length > 7
+    success_msg: "{{ success_hpc_ip }}"
+    fail_msg: "{{ fail_hpc_ip }}"
+  register: hpc_ip_check
+
+- name: Assert public_ip
+  assert:
+    that:
+      - public_ip | length > 7
+    success_msg: "{{ success_hpc_ip }}"
+    fail_msg: "{{ fail_hpc_ip }}"
+  register: public_ip_check
+
+- name: Assert hpc_nic
+  assert:
+    that:
+      - nic | length > nic_min_length | int - 1
+      - nic != internet_nic
+    success_msg: "{{ success_msg_hpc_nic }}"
+    fail_msg: "{{ fail_msg_hpc_nic }}"
+  register: hpc_nic_check
+
+- name: Assert public_nic
+  assert:
+    that:
+      - internet_nic | length > nic_min_length | int - 1
+      - nic != internet_nic
+    success_msg: "{{ success_msg_public_nic }}"
+    fail_msg: "{{ fail_msg_public_nic }}"
+  register: public_nic_check
+
+- name: Assert mapping_file_exists
+  assert:
+    that:
+      - "( mapping_file == true ) or ( mapping_file == false )"
+    success_msg: "{{ success_mapping_file }}"
+    fail_msg: "{{ fail_mapping_file }}"
+
+- name: Set the mapping file value
+  set_fact:
+    mapping_file: true
+  when: path_for_mapping_file != ""
+  
+- name: Assert valid mapping_file_path
+  stat: 
+    path: "{{ path_for_mapping_file }}"
+  when: mapping_file == true
+  register: result_path_mapping_file
+  
+- name : Valid mapping_file_path
+  fail:
+    msg: "{{ invalid_mapping_file_path }}"
+  when: ( mapping_file == true ) and ( result_path_mapping_file.stat.exists == false )
+
+- name: Assert valid iso_file_path
+  stat:
+    path: "{{ path_for_iso_file }}"
+  register: result_path_iso_file
+
+- name : Incorrect iso_file_path
+  fail:
+    msg: "{{ invalid_iso_file_path }}"
+  when: ( result_path_iso_file.stat.exists == false ) and ( ".iso" not in  path_for_iso_file )
+
+- name: Fail when iso path valid but image not right
+  fail:
+    msg: "{{ invalid_iso_file_path }}"
+  when: ( result_path_iso_file.stat.exists == true ) and ( ".iso" not in path_for_iso_file )
+
+- name: Check the subnet of dhcp start range
+  shell: |
+    IFS=. read -r i1 i2 i3 i4 <<< "{{ dhcp_start_ip }}"
+    IFS=. read -r m1 m2 m3 m4 <<< "{{ netmask }}"
+    printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
+  args:
+    warn: no
+  register: dhcp_start_sub_result
+  changed_when: false
+  when: dhcp_start_ip != "false"
+
+- name: Set the start dhcp subnet
+  set_fact:
+    dhcp_start_sub: "{{ dhcp_start_sub_result.stdout }}"
+  when: dhcp_start_ip != "false"
+
+- name: Check the subnet of dhcp end range
+  shell: |
+    IFS=. read -r i1 i2 i3 i4 <<< "{{ dhcp_end_ip }}"
+    IFS=. read -r m1 m2 m3 m4 <<< "{{ netmask }}"
+    printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
+  register: dhcp_end_sub_result
+  when: dhcp_end_ip != "false"
+  changed_when: false
+
+- name: Set the end dhcp subnet
+  set_fact:
+    dhcp_end_sub: "{{ dhcp_end_sub_result.stdout }}"
+  when: dhcp_end_ip != "false"
+
+- name: Assert dhcp_start_ip_range
+  assert:
+    that:
+      - dhcp_start_ip != "false"
+      - dhcp_start_ip != dhcp_end_ip
+      - dhcp_start_sub == subnet
+      - dhcp_start_sub == dhcp_end_sub
+    success_msg: "{{ success_dhcp_range }}"
+    fail_msg: "{{ fail_dhcp_range }}"
+  register: dhcp_start_ip_check
+
+- name: Assert dhcp_end_ip_range
+  assert:
+    that:
+      - dhcp_end_ip != "false"
+      - dhcp_start_ip != dhcp_end_ip
+      - dhcp_end_sub == subnet
+      - dhcp_start_sub == dhcp_end_sub
+    success_msg: "{{ success_dhcp_range }}"
+    fail_msg: "{{ fail_dhcp_range }}"
+  register: dhcp_end_ip_check
+
+- name: Create ansible vault key
+  set_fact:
+    vault_key: "{{ lookup('password', '/dev/null chars=ascii_letters') }}"
+  when: "'$ANSIBLE_VAULT;' not in config_content.stdout"
+
+- name: Save vault key
+  copy:
+    dest: "{{ vault_filename }}"
+    content: |
+      {{ vault_key }}
+    owner: root
+    force: yes
+  when: "'$ANSIBLE_VAULT;' not in config_content.stdout"
+
+- name: Encrypt input config file
+  command: >-
+    ansible-vault encrypt {{ input_config_filename }}
+    --vault-password-file {{ vault_filename }}
+  changed_when: false
+
+- name: Check if omnia_vault_key exists
+  stat:
+    path: "{{ role_path }}/../../../{{ config_vaultname }}"
+  register: vault_key_result
+
+- name: Create ansible vault key if it does not exist
+  set_fact:
+    vault_key: "{{ lookup('password', '/dev/null chars=ascii_letters') }}"
+  when: not vault_key_result.stat.exists
+
+- name: Save vault key
+  copy:
+    dest: "{{ role_path }}/../../../{{ config_vaultname }}"
+    content: |
+      {{ vault_key }}
+    owner: root
+    force: yes
+  when: not vault_key_result.stat.exists
+
+- name: Check if omnia config file is encrypted
+  command: cat {{ role_path }}/../../../{{ config_filename }}
+  changed_when: false
+  register: config_content
+  no_log: True
+
+- name: Decrpyt omnia_config.yml
+  command: >-
+    ansible-vault decrypt {{ role_path }}/../../../{{ config_filename }}
+    --vault-password-file {{ role_path }}/../../../{{ config_vaultname }}
+  when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+
+- name: Include variable file omnia_config.yml
+  include_vars: "{{ role_path }}/../../../{{ config_filename }}"
+  no_log: True
+
+- name: Validate input parameters are not empty
+  fail:
+    msg: "{{ input_config_failure_msg }}"
+  register: input_config_check
+  when:
+    - mariadb_password | length < 1 or
+      k8s_cni | length < 1
+
+- name: Assert mariadb_password
+  assert:
+    that:
+        - mariadb_password | length > min_length | int - 1
+        - mariadb_password | length < max_length | int + 1
+        - '"-" not in mariadb_password '
+        - '"\\" not in mariadb_password '
+        - '"\"" not in mariadb_password '
+        - " \"'\" not in mariadb_password "
+    success_msg: "{{ success_msg_mariadb_password }}"
+    fail_msg: "{{ fail_msg_mariadb_password }}"
+
+- name: Assert kubernetes cni
+  assert:
+    that: "('calico' in k8s_cni) or ('flannel' in k8s_cni)"
+    success_msg: "{{ success_msg_k8s_cni }}"
+    fail_msg: "{{ fail_msg_k8s_cni }}"
+
+- name: Save input variables from file
+  set_fact:
+    db_password: "{{ mariadb_password }}"
+    k8s_cni: "{{ k8s_cni }}"
+  no_log: True
+
+- name: Encrypt input config file
+  command: >-
+    ansible-vault encrypt {{ role_path }}/../../../{{ config_filename }}
+    --vault-password-file {{ role_path }}/../../../{{ config_vaultname }}
+  changed_when: false

+ 46 - 0
appliance/roles/common/tasks/pre_requisite.yml

@@ -0,0 +1,46 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Set omnia.log file
+  replace:
+    path: /etc/ansible/ansible.cfg
+    regexp: '#log_path = /var/log/ansible.log'
+    replace: 'log_path = /var/log/omnia.log'
+  tags: install
+
+- name: Check OS support
+  fail:
+    msg: "{{ os_status }}"
+  when: not(ansible_distribution == os_name and ansible_distribution_version >= os_version)
+  register: os_value
+  tags: install
+
+- name: Disable SElinux
+  selinux:
+    state: disabled
+  tags: install
+
+- name: Status of SElinux
+  fail:
+    msg: "{{ selinux_status }}"
+  when: ansible_selinux.status != 'disabled'
+  register: selinux_value
+  tags: install
+
+- name: State of firewall
+  service:
+    name: firewalld
+    state: started
+    enabled: yes

+ 94 - 0
appliance/roles/common/vars/main.yml

@@ -0,0 +1,94 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# vars file for common
+
+# Usage: package_installation.yml
+common_packages:
+  - epel-release
+  - yum-utils
+  - git
+  - gcc
+  - gcc-c++
+  - nodejs
+  - device-mapper-persistent-data
+  - bzip2
+  - python2-pip
+  - python3-pip
+  - nano
+  - lvm2
+  - gettext
+  - python-docker
+  - net-tools
+  - python-netaddr
+  - yum-plugin-versionlock
+
+# Usage: pre_requisite.yml
+internet_delay: 0
+internet_timeout: 10
+hostname: github.com
+port_no: 22
+os_name: CentOS
+os_version: '7.9' 
+internet_status: "Failed. No Internet connection. Make sure network is up."
+os_status: "Unsupported OS or OS version. OS should be {{ os_name }} and Version should be {{ os_version }} or more"
+selinux_status: "SElinux is not disabled. Disable it in /etc/sysconfig/selinux and reboot the system"
+iso_name: CentOS-7-x86_64-Minimal-2009.iso
+iso_fail: "Iso file not found. Download and copy the iso file to omnia/appliance/roles/provision/files"
+
+# Usage: docker_installation.yml
+docker_repo_url: https://download.docker.com/linux/centos/docker-ce.repo
+docker_repo_dest: /etc/yum.repos.d/docker-ce.repo
+success: '0'
+container_type: docker
+container_repo_install:
+  - docker-ce-cli-20.10.2
+  - docker-ce-20.10.2
+docker_compose: docker-compose
+daemon_dest: /etc/docker/
+
+# Usage: docker_volume.yml
+docker_volume_name: omnia-storage
+
+# Usage: password_config.yml
+input_config_filename: "appliance_config.yml"
+fail_msg_provision_password: "Failed. Incorrect provision_password format provided in appliance_config.yml file"
+success_msg_provision_password: "provision_password validated"
+fail_msg_awx_password: "Failed. Incorrect awx_password format provided in appliance_config.yml file"
+success_msg_awx_password: "awx_password validated"
+fail_msg_hpc_nic: "Failed. Incorrect hpc_nic format provided in appliance_config.yml file"
+success_msg_hpc_nic: "hpc_nic validated"
+fail_msg_public_nic: "Failed. Incorrect public_nic format provided in appliance_config.yml file"
+success_msg_public_nic: "public_nic validated"
+success_mapping_file: "mapping_file_exists validated"
+fail_mapping_file: "Failed. Incorrect mapping_file_exists value in appliance_config.yml. It should be either true or false"
+input_config_failure_msg: "Please provide all the required parameters in appliance_config.yml"
+success_dhcp_range: "Dhcp_range validated"
+fail_dhcp_range: "Failed. Incorrect range assigned for dhcp"
+success_hpc_ip: "IP validated"
+fail_hpc_ip: "Failed. Nic should be configured"
+fail_mapping_file_path: "Failed. Mapping_file_path input is empty in appliance_config.yml. Either set mapping_file_exists to false or provide a path for a valid mapping file."
+invalid_mapping_file_path: "Incorrect mapping_file_path provided in appliance_config.yml"
+invalid_iso_file_path: "Incorrect iso_file_path provided in appliance_config.yml."
+min_length: 8
+max_length: 30
+nic_min_length: 3
+vault_filename: .vault_key
+config_filename: "omnia_config.yml"
+config_vaultname: .omnia_vault_key
+fail_msg_mariadb_password: "Failed. Incorrect mariadb_password format provided in omnia_config.yml file"
+success_msg_mariadb_password: "mariadb_password validated"
+success_msg_k8s_cni: "Kubernetes CNI Validated"
+fail_msg_k8s_cni: "Failed. Kubernetes CNI is incorrect in omnia_config.yml"

+ 47 - 0
appliance/roles/inventory/files/add_host.yml

@@ -0,0 +1,47 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Check if host already exists
+  command: awk "{{ '/'+ item + '/' }}" /root/inventory
+  register: check_host
+  changed_when: no
+
+- name: Initialise host description
+  set_fact:
+    host_description: "Description Unavailable"
+
+- name: Fetch description
+  set_fact:
+    host_description: "CPU:{{ hostvars[item]['ansible_processor_count'] }}
+    Cores:{{ hostvars[item]['ansible_processor_cores'] }}
+    Memory:{{ hostvars[item]['ansible_memtotal_mb'] }}MB
+    BIOS:{{ hostvars[item]['ansible_bios_version'] }}"
+  when: not check_host.stdout | regex_search(item)
+  ignore_errors: yes
+
+- name: Add host
+  lineinfile:
+    path:  "/root/inventory"
+    line: "    {{ item }}:\n      _awx_description: {{ host_description }}"
+  when:
+    - not check_host.stdout | regex_search(item)
+    - host_description != "Description Unavailable"
+
+- name: Host added msg
+  debug:
+    msg: "{{ host_added_msg + item }}"
+  when:
+    - not check_host.stdout | regex_search(item)
+    - host_description != "Description Unavailable"

+ 148 - 0
appliance/roles/inventory/files/create_inventory.yml

@@ -0,0 +1,148 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Find reachable hosts
+  hosts: all
+  gather_facts: false
+  ignore_unreachable: true
+  ignore_errors: true
+  tasks:
+    - name: Check for reachable nodes
+      command: ping -c1 {{ inventory_hostname }}
+      delegate_to: localhost
+      register: ping_result
+      ignore_errors: yes
+      changed_when: false
+
+    - name: Refresh ssh keys
+      command: ssh-keygen -R {{ inventory_hostname }}
+      delegate_to: localhost
+      changed_when: false
+
+    - name: Group reachable hosts
+      group_by:
+        key: "reachable"
+      when: "'100% packet loss' not in ping_result.stdout"
+
+- name: Get provision password
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Include vars file of inventory role
+      include_vars: ../vars/main.yml
+
+- name: Set hostname on reachable nodes and gather facts
+  hosts: reachable
+  gather_facts: False
+  ignore_unreachable: true
+  remote_user: "{{ cobbler_username }}"
+  vars:
+    ansible_password: "{{ cobbler_password }}"
+    ansible_become_pass: "{{ cobbler_password }}"
+    ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+    mapping_file_present: ""
+  tasks:
+    - name: Setup
+      setup:
+       filter: ansible_*
+
+    - name: Check hostname of server
+      command: hostname
+      register: hostname_check
+      changed_when: false
+      ignore_errors: true
+
+    - name: Check if IP present in mapping file
+      command: grep "{{ inventory_hostname }}" ../../provision/files/new_mapping_file.csv
+      delegate_to: localhost
+      register: file_present
+      when: mapping_file | bool == true
+      ignore_errors: true
+
+    - name: Set fact if mapping file present
+      set_fact:
+        mapping_file_present: "{{ file_present.stdout }}"
+      when: mapping_file | bool == true
+      ignore_errors: true
+
+    - name: Get the static hostname from mapping file
+      shell: awk -F',' '$3 == "{{ inventory_hostname }}" { print $2 }' ../../provision/files/new_mapping_file.csv
+      delegate_to: localhost
+      when: ('localhost' in hostname_check.stdout) and (mapping_file_present != "" ) and ( mapping_file | bool == true )
+      register: host_name
+      ignore_errors: true
+
+    - name: Set the hostname from mapping file
+      hostname:
+        name: "{{ host_name.stdout }}"
+      when: ('localhost' in hostname_check.stdout) and (mapping_file_present != "" ) and  (mapping_file | bool == true )
+      ignore_errors: true
+    
+    - name: Set the hostname if hostname not present mapping file
+      hostname:
+        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+      when: ('localhost' in hostname_check.stdout) and (file_present.rc != 0) and (mapping_file | bool == true )
+      ignore_errors: true
+
+    - name: Set the system hostname
+      hostname:
+        name: "compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+      when: ('localhost' in hostname_check.stdout) and (mapping_file | bool == false)
+      ignore_errors: true
+
+    - name: Add new hostname to /etc/hosts from mapping file
+      lineinfile:
+        dest: /etc/hosts
+        regexp: '^127\.0\.0\.1[ \t]+localhost'
+        line: "127.0.0.1 localhost {{ host_name.stdout }}"
+        state: present
+      when: ('localhost' in hostname_check.stdout) and ( mapping_file_present != "" ) and ( mapping_file | bool == true )
+      ignore_errors: true
+
+    - name: Add new hostname to /etc/hosts if hostname not present mapping fil
+      lineinfile:
+        dest: /etc/hosts
+        regexp: '^127\.0\.0\.1[ \t]+localhost'
+        line: "127.0.0.1 localhost compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+        state: present
+      when: ('localhost' in hostname_check.stdout) and ( file_present.rc != 0 ) and ( mapping_file | bool == true )
+      ignore_errors: true
+
+    - name: Add new hostname to /etc/hosts
+      lineinfile:
+        dest: /etc/hosts
+        regexp: '^127\.0\.0\.1[ \t]+localhost'
+        line: "127.0.0.1 localhost compute{{ inventory_hostname.split('.')[-2] + '-' + inventory_hostname.split('.')[-1] }}"
+        state: present
+      when: ('localhost' in hostname_check.stdout) and (mapping_file | bool == false )
+      ignore_errors: true
+
+- name: Update inventory
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Update inventory file
+      block:
+        - name: Fetch facts and add new hosts
+          include_tasks: add_host.yml
+          with_items: "{{ groups['reachable'] }}"
+      when: "'reachable' in groups"
+
+    - name: Show unreachable hosts
+      debug:
+        msg: "{{ host_unreachable_msg }} + {{ groups['ungrouped'] }}"
+      when: "'ungrouped' in groups"

+ 100 - 0
appliance/roles/inventory/tasks/main.yml

@@ -0,0 +1,100 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Set Facts
+  set_fact:
+    ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+
+- name: Check if provisioned host file exists
+  stat:
+    path: "{{ role_path }}/files/provisioned_hosts.yml"
+  register: provisioned_file_result
+
+- name: Include vars file of common role
+  include_vars: "{{ role_path }}/../common/vars/main.yml"
+  no_log: True
+
+- name: Include vars file of web_ui role
+  include_vars: "{{ role_path }}/../web_ui/vars/main.yml"
+  no_log: True
+
+- name: Update inventory file
+  block:
+    - name: Check if input config file is encrypted
+      command: cat {{ input_config_filename }}
+      changed_when: false
+      register: config_content
+
+    - name: Decrpyt appliance_config.yml
+      command: >-
+        ansible-vault decrypt {{ input_config_filename }}
+        --vault-password-file {{ vault_filename }}
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+
+    - name: Include variable file appliance_config.yml
+      include_vars: "{{ input_config_filename }}"
+      no_log: True
+
+    - name: Save input variables from file
+      set_fact:
+        cobbler_password: "{{ provision_password }}"
+        mapping_file: false
+        path_mapping_file: "{{ mapping_file_path }}"
+      no_log: True
+
+    - name: Check the status for mapping file
+      set_fact:
+        mapping_file: true
+      when: path_mapping_file != ""
+
+    - name: Encrypt input config file
+      command: >-
+        ansible-vault encrypt {{ input_config_filename }}
+        --vault-password-file {{ vault_filename }}
+      changed_when: false
+
+    - name: Check if inventory file already exists
+      file:
+        path: "/root/inventory"
+        state: absent
+
+    - name: Create empty inventory file
+      copy:
+        dest:  "/root/inventory"
+        content: |
+          ---
+          all:
+            hosts:
+        owner: root
+        mode: 0775
+
+    - name: Add inventory playbook
+      block:
+        - name: add hosts with description to inventory file
+          command: >-
+            ansible-playbook -i {{ role_path }}/files/provisioned_hosts.yml
+            {{ role_path }}/files/create_inventory.yml
+            --extra-vars "cobbler_username={{ cobbler_username }} cobbler_password={{ cobbler_password }} mapping_file={{ mapping_file | bool }}"
+          no_log: True
+          register: register_error
+      rescue:
+        - name: Fail if host addition was not successful
+          fail:
+            msg: "{{ register_error.stderr + register_error.stdout | regex_replace(cobbler_username) | regex_replace(cobbler_password) }}"
+
+  when: provisioned_file_result.stat.exists
+
+- name: push inventory to AWX
+  command: awx-manage inventory_import --inventory-name {{ omnia_inventory_name }} --source /root/inventory
+  when: provisioned_file_result.stat.exists

+ 16 - 0
appliance/roles/inventory/vars/main.yml

@@ -0,0 +1,16 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+host_added_msg: "Added host to inventory: "
+host_unreachable_msg: "Following hosts are unreachable: "

+ 51 - 0
appliance/roles/provision/files/Dockerfile

@@ -0,0 +1,51 @@
+FROM centos:7
+
+# RPM REPOs
+RUN yum install -y \
+    epel-release \
+    && yum clean all \
+    && rm -rf /var/cache/yum
+
+RUN yum update -y \
+    && yum clean all \
+    && rm -rf /var/cache/yum
+
+RUN yum install -y \
+  cobbler \
+  cobbler-web \
+  ansible \
+  pykickstart \
+  cronie \
+  debmirror \
+  curl \
+  rsync \
+  httpd\
+  dhcp \
+  xinetd \
+  net-tools \
+  memtest86+ \
+  && yum clean all \
+  &&  rm -rf /var/cache/yum
+
+RUN mkdir /root/omnia
+
+#Copy Configuration files
+COPY settings /etc/cobbler/settings
+COPY dhcp.template  /etc/cobbler/dhcp.template
+COPY modules.conf  /etc/cobbler/modules.conf
+COPY tftp /etc/xinetd.d/tftp
+COPY .users.digest /etc/cobbler/users.digest
+COPY kickstart.yml /root
+COPY tftp.yml /root
+COPY inventory_creation.yml /root
+COPY centos7.ks /var/lib/cobbler/kickstarts
+
+EXPOSE 69 80 443 25151
+
+VOLUME [ "/var/www/cobbler", "/var/lib/cobbler/backup", "/mnt" ]
+
+RUN systemctl enable cobblerd
+RUN systemctl enable httpd
+RUN systemctl enable rsyncd
+
+CMD ["sbin/init"]

+ 469 - 0
appliance/roles/provision/files/cobbler_settings

@@ -0,0 +1,469 @@
+---
+# cobbler settings file
+# restart cobblerd and run "cobbler sync" after making changes
+# This config file is in YAML 1.0 format
+# see http://yaml.org
+# ==========================================================
+# if 1, cobbler will allow insertions of system records that duplicate
+# the --dns-name information of other system records.  In general,
+# this is undesirable and should be left 0.
+allow_duplicate_hostnames: 0
+
+# if 1, cobbler will allow insertions of system records that duplicate
+# the ip address information of other system records.  In general,
+# this is undesirable and should be left 0.
+allow_duplicate_ips: 0
+
+# if 1, cobbler will allow insertions of system records that duplicate
+# the mac address information of other system records.  In general,
+# this is undesirable.
+allow_duplicate_macs: 0
+
+# if 1, cobbler will allow settings to be changed dynamically without
+# a restart of the cobblerd daemon. You can only change this variable
+# by manually editing the settings file, and you MUST restart cobblerd
+# after changing it.
+allow_dynamic_settings: 0
+
+# by default, installs are *not* set to send installation logs to the cobbler
+# # # server.  With 'anamon_enabled', kickstart templates may use the pre_anamon
+# # # snippet to allow remote live monitoring of their installations from the
+# # # cobbler server.  Installation logs will be stored under
+# # # /var/log/cobbler/anamon/.  NOTE: This does allow an xmlrpc call to send logs
+# # # to this directory, without authentication, so enable only if you are
+# # # ok with this limitation.
+anamon_enabled: 0
+
+# If using authn_pam in the modules.conf, this can be configured
+# to change the PAM service authentication will be tested against.
+# The default value is "login".
+authn_pam_service: "login"
+
+# How long the authentication token is valid for, in seconds
+auth_token_expiration: 3600
+
+# Email out a report when cobbler finishes installing a system.
+# enabled: set to 1 to turn this feature on
+# sender: optional
+# email: which addresses to email
+# smtp_server: used to specify another server for an MTA
+# subject: use the default subject unless overridden
+build_reporting_enabled: 0
+build_reporting_sender: ""
+build_reporting_email: [ 'root@localhost' ]
+build_reporting_smtp_server: "localhost"
+build_reporting_subject: ""
+build_reporting_ignorelist: [ "" ]
+
+# Cheetah-language kickstart templates can import Python modules.
+# while this is a useful feature, it is not safe to allow them to
+# import anything they want. This whitelists which modules can be
+# imported through Cheetah.  Users can expand this as needed but
+# should never allow modules such as subprocess or those that
+# allow access to the filesystem as Cheetah templates are evaluated
+# by cobblerd as code.
+cheetah_import_whitelist:
+ - "random"
+ - "re"
+ - "time"
+
+# Default createrepo_flags to use for new repositories. If you have
+# createrepo >= 0.4.10, consider "-c cache --update -C", which can
+# dramatically improve your "cobbler reposync" time.  "-s sha"
+# enables working with Fedora repos from F11/F12 from EL-4 or
+# EL-5 without python-hashlib installed (which is not available
+# on EL-4)
+createrepo_flags: "-c cache -s sha"
+
+# if no kickstart is specified to profile add, use this template
+default_kickstart: /var/lib/cobbler/kickstarts/default.ks
+
+# configure all installed systems to use these nameservers by default
+# unless defined differently in the profile.  For DHCP configurations
+# you probably do /not/ want to supply this.
+default_name_servers: []
+
+# if using the authz_ownership module (see the Wiki), objects
+# created without specifying an owner are assigned to this
+# owner and/or group.  Can be a comma seperated list.
+default_ownership:
+ - "admin"
+
+# cobbler has various sample kickstart templates stored
+# in /var/lib/cobbler/kickstarts/.  This controls
+# what install (root) password is set up for those
+# systems that reference this variable.  The factory
+# default is "cobbler" and cobbler check will warn if
+# this is not changed.
+# The simplest way to change the password is to run
+# openssl passwd -1
+# and put the output between the "" below.
+default_password_crypted: "password"
+
+# the default template type to use in the absence of any
+# other detected template. If you do not specify the template
+# with '#template=<template_type>' on the first line of your
+# templates/snippets, cobbler will assume try to use the
+# following template engine to parse the templates.
+#
+# Current valid values are: cheetah, jinja2
+default_template_type: "cheetah"
+
+# for libvirt based installs in koan, if no virt bridge
+# is specified, which bridge do we try?  For EL 4/5 hosts
+# this should be xenbr0, for all versions of Fedora, try
+# "virbr0".  This can be overriden on a per-profile
+# basis or at the koan command line though this saves
+# typing to just set it here to the most common option.
+default_virt_bridge: xenbr0
+
+# use this as the default disk size for virt guests (GB)
+default_virt_file_size: 5
+
+# use this as the default memory size for virt guests (MB)
+default_virt_ram: 512
+
+# if koan is invoked without --virt-type and no virt-type
+# is set on the profile/system, what virtualization type
+# should be assumed?  Values: xenpv, xenfv, qemu, vmware
+# (NOTE: this does not change what virt_type is chosen by import)
+default_virt_type: xenpv
+
+# enable gPXE booting? Enabling this option will cause cobbler
+# to copy the undionly.kpxe file to the tftp root directory,
+# and if a profile/system is configured to boot via gpxe it will
+# chain load off pxelinux.0.
+# Default: 0
+enable_gpxe: 0
+
+# controls whether cobbler will add each new profile entry to the default
+# PXE boot menu.  This can be over-ridden on a per-profile
+# basis when adding/editing profiles with --enable-menu=0/1.  Users
+# should ordinarily leave this setting enabled unless they are concerned
+# with accidental reinstalls from users who select an entry at the PXE
+# boot menu.  Adding a password to the boot menus templates
+# may also be a good solution to prevent unwanted reinstallations
+enable_menu: 1
+
+# enable Func-integration?  This makes sure each installed machine is set up
+# to use func out of the box, which is a powerful way to script and control
+# remote machines.
+# Func lives at http://fedorahosted.org/func
+# read more at https://github.com/cobbler/cobbler/wiki/Func-integration
+# you will need to mirror Fedora/EPEL packages for this feature, so see
+# https://github.com/cobbler/cobbler/wiki/Manage-yum-repos if you want cobbler
+# to help you with this
+func_auto_setup: 0
+func_master: overlord.example.org
+
+# change this port if Apache is not running plaintext on port
+# 80.  Most people can leave this alone.
+http_port: 80
+
+# kernel options that should be present in every cobbler installation.
+# kernel options can also be applied at the distro/profile/system
+# level.
+kernel_options:
+ ksdevice: link
+ lang: 'en_US '
+ text: ~
+
+# s390 systems require additional kernel options in addition to the
+# above defaults
+kernel_options_s390x:
+ RUNKS: 1
+ ramdisk_size: 40000
+ root: /dev/ram0
+ ro: ~
+ ip: off
+ vnc: ~
+
+# configuration options if using the authn_ldap module. See the
+# the Wiki for details.  This can be ignored if you are not using
+# LDAP for WebUI/XMLRPC authentication.
+ldap_server: "ldap.example.com"
+ldap_base_dn: "DC=example,DC=com"
+ldap_port: 389
+ldap_tls: 1
+ldap_anonymous_bind: 1
+ldap_search_bind_dn: ''
+ldap_search_passwd: ''
+ldap_search_prefix: 'uid='
+ldap_tls_cacertfile: ''
+ldap_tls_keyfile: ''
+ldap_tls_certfile: ''
+
+# cobbler has a feature that allows for integration with config management
+# systems such as Puppet.  The following parameters work in conjunction with
+# --mgmt-classes  and are described in furhter detail at:
+# https://github.com/cobbler/cobbler/wiki/Using-cobbler-with-a-configuration-management-system
+mgmt_classes: []
+mgmt_parameters:
+ from_cobbler: 1
+
+# if enabled, this setting ensures that puppet is installed during
+# machine provision, a client certificate is generated and a
+# certificate signing request is made with the puppet master server
+puppet_auto_setup: 0
+
+# when puppet starts on a system after installation it needs to have
+# its certificate signed by the puppet master server. Enabling the
+# following feature will ensure that the puppet server signs the
+# certificate after installation if the puppet master server is
+# running on the same machine as cobbler. This requires
+# puppet_auto_setup above to be enabled
+sign_puppet_certs_automatically: 0
+
+# location of the puppet executable, used for revoking certificates
+puppetca_path: "/usr/bin/puppet"
+
+# when a puppet managed machine is reinstalled it is necessary to
+# remove the puppet certificate from the puppet master server before a
+# new certificate is signed (see above). Enabling the following
+# feature will ensure that the certificate for the machine to be
+# installed is removed from the puppet master server if the puppet
+# master server is running on the same machine as cobbler. This
+# requires puppet_auto_setup above to be enabled
+remove_old_puppet_certs_automatically: 0
+
+# choose a --server argument when running puppetd/puppet agent during kickstart
+#puppet_server: 'puppet'
+
+# let cobbler know that you're using a newer version of puppet
+# choose version 3 to use: 'puppet agent'; version 2 uses status quo: 'puppetd'
+#puppet_version: 2
+
+# choose whether to enable puppet parameterized classes or not.
+# puppet versions prior to 2.6.5 do not support parameters
+#puppet_parameterized_classes: 1
+
+# set to 1 to enable Cobbler's DHCP management features.
+# the choice of DHCP management engine is in /etc/cobbler/modules.conf
+manage_dhcp: 1
+
+# set to 1 to enable Cobbler's DNS management features.
+# the choice of DNS mangement engine is in /etc/cobbler/modules.conf
+manage_dns: 0
+
+# set to path of bind chroot to create bind-chroot compatible bind
+# configuration files.  This should be automatically detected.
+bind_chroot_path: ""
+
+# set to the ip address of the master bind DNS server for creating secondary
+# bind configuration files
+bind_master: 127.0.0.1
+
+# manage_genders - Bool to enable/disable managing an /etc/genders file for use with pdsh and others.
+manage_genders: 0
+
+# bind_manage_ipmi - used to let bind manage IPMI addresses if the power management address is an IP and if manage_bind is set.
+bind_manage_ipmi: 0
+
+# set to 1 to enable Cobbler's TFTP management features.
+# the choice of TFTP mangement engine is in /etc/cobbler/modules.conf
+manage_tftpd: 1
+
+# set to 1 to enable Cobbler's RSYNC management features.
+manage_rsync: 0
+
+# if using BIND (named) for DNS management in /etc/cobbler/modules.conf
+# and manage_dns is enabled (above), this lists which zones are managed
+# See the Wiki (https://github.com/cobbler/cobbler/wiki/Dns-management) for more info
+manage_forward_zones: []
+manage_reverse_zones: ['172.17']
+
+# if using cobbler with manage_dhcp, put the IP address
+# of the cobbler server here so that PXE booting guests can find it
+# if you do not set this correctly, this will be manifested in TFTP open timeouts.
+next_server: ip
+
+# settings for power management features.  optional.
+# see https://github.com/cobbler/cobbler/wiki/Power-management to learn more
+# choices (refer to codes.py):
+#    apc_snmp bladecenter bullpap drac ether_wake ilo integrity
+#    ipmilan ipmitool lpar rsa virsh wti
+power_management_default_type: 'ipmitool'
+
+# the commands used by the power management module are sourced
+# from what directory?
+power_template_dir: "/etc/cobbler/power"
+
+# if this setting is set to 1, cobbler systems that pxe boot
+# will request at the end of their installation to toggle the
+# --netboot-enabled record in the cobbler system record.  This eliminates
+# the potential for a PXE boot loop if the system is set to PXE
+# first in it's BIOS order.  Enable this if PXE is first in your BIOS
+# boot order, otherwise leave this disabled.   See the manpage
+# for --netboot-enabled.
+pxe_just_once: 1
+
+# the templates used for PXE config generation are sourced
+# from what directory?
+pxe_template_dir: "/etc/cobbler/pxe"
+
+# Path to where system consoles are
+consoles: "/var/consoles"
+
+# Are you using a Red Hat management platform in addition to Cobbler?
+# Cobbler can help you register to it.  Choose one of the following:
+#   "off"    : I'm not using Red Hat Network, Satellite, or Spacewalk
+#   "hosted" : I'm using Red Hat Network
+#   "site"   : I'm using Red Hat Satellite Server or Spacewalk
+# You will also want to read: https://github.com/cobbler/cobbler/wiki/Tips-for-RHN
+redhat_management_type: "off"
+
+# if redhat_management_type is enabled, choose your server
+#   "management.example.org" : For Satellite or Spacewalk
+#   "xmlrpc.rhn.redhat.com"  : For Red Hat Network
+# This setting is also used by the code that supports using Spacewalk/Satellite users/passwords
+# within Cobbler Web and Cobbler XMLRPC.  Using RHN Hosted for this is not supported.
+# This feature can be used even if redhat_management_type is off, you just have
+# to have authn_spacewalk selected in modules.conf
+redhat_management_server: "xmlrpc.rhn.redhat.com"
+
+# specify the default Red Hat authorization key to use to register
+# system.  If left blank, no registration will be attempted.  Similarly
+# you can set the --redhat-management-key to blank on any system to
+# keep it from trying to register.
+redhat_management_key: ""
+
+# if using authn_spacewalk in modules.conf to let cobbler authenticate
+# against Satellite/Spacewalk's auth system, by default it will not allow per user
+# access into Cobbler Web and Cobbler XMLRPC.
+# in order to permit this, the following setting must be enabled HOWEVER
+# doing so will permit all Spacewalk/Satellite users of certain types to edit all
+# of cobbler's configuration.
+# these roles are:  config_admin and org_admin
+# users should turn this on only if they want this behavior and
+# do not have a cross-multi-org seperation concern.  If you have
+# a single org in your satellite, it's probably safe to turn this
+# on and then you can use CobblerWeb alongside a Satellite install.
+redhat_management_permissive: 0
+
+# if set to 1, allows /usr/bin/cobbler-register (part of the koan package)
+# to be used to remotely add new cobbler system records to cobbler.
+# this effectively allows for registration of new hardware from system
+# records.
+register_new_installs: 0
+
+# Flags to use for yum's reposync.  If your version of yum reposync
+# does not support -l, you may need to remove that option.
+reposync_flags: "-l -n -d"
+
+# when DHCP and DNS management are enabled, cobbler sync can automatically
+# restart those services to apply changes.  The exception for this is
+# if using ISC for DHCP, then omapi eliminates the need for a restart.
+# omapi, however, is experimental and not recommended for most configurations.
+# If DHCP and DNS are going to be managed, but hosted on a box that
+# is not on this server, disable restarts here and write some other
+# script to ensure that the config files get copied/rsynced to the destination
+# box.  This can be done by modifying the restart services trigger.
+# Note that if manage_dhcp and manage_dns are disabled, the respective
+# parameter will have no effect.  Most users should not need to change
+# this.
+restart_dns: 1
+restart_dhcp: 1
+
+# install triggers are scripts in /var/lib/cobbler/triggers/install
+# that are triggered in kickstart pre and post sections.  Any
+# executable script in those directories is run.  They can be used
+# to send email or perform other actions.  They are currently
+# run as root so if you do not need this functionality you can
+# disable it, though this will also disable "cobbler status" which
+# uses a logging trigger to audit install progress.
+run_install_triggers: 1
+
+# enables a trigger which version controls all changes to /var/lib/cobbler
+# when add, edit, or sync events are performed.  This can be used
+# to revert to previous database versions, generate RSS feeds, or for
+# other auditing or backup purposes. "git" and "hg" are currently suported,
+# but git is the recommend SCM for use with this feature.
+scm_track_enabled: 0
+scm_track_mode: "git"
+
+# this is the address of the cobbler server -- as it is used
+# by systems during the install process, it must be the address
+# or hostname of the system as those systems can see the server.
+# if you have a server that appears differently to different subnets
+# (dual homed, etc), you need to read the --server-override section
+# of the manpage for how that works.
+server: ip
+
+# If set to 1, all commands will be forced to use the localhost address
+# instead of using the above value which can force commands like
+# cobbler sync to open a connection to a remote address if one is in the
+# configuration and would traceback.
+client_use_localhost: 0
+
+# If set to 1, all commands to the API (not directly to the XMLRPC
+# server) will go over HTTPS instead of plaintext. Be sure to change
+# the http_port setting to the correct value for the web server
+client_use_https: 0
+
+# this is a directory of files that cobbler uses to make
+# templating easier.  See the Wiki for more information.  Changing
+# this directory should not be required.
+snippetsdir: /var/lib/cobbler/snippets
+
+# Normally if a kickstart is specified at a remote location, this
+# URL will be passed directly to the kickstarting system, thus bypassing
+# the usual snippet templating Cobbler does for local kickstart files. If
+# this option is enabled, Cobbler will fetch the file contents internally
+# and serve a templated version of the file to the client.
+template_remote_kickstarts: 0
+
+# should new profiles for virtual machines default to auto booting with the physical host when the physical host reboots?
+# this can be overridden on each profile or system object.
+virt_auto_boot: 1
+
+# cobbler's web directory.  Don't change this setting -- see the
+# Wiki on "relocating your cobbler install" if your /var partition
+# is not large enough.
+webdir: /var/www/cobbler
+
+# cobbler's public XMLRPC listens on this port.  Change this only
+# if absolutely needed, as you'll have to start supplying a new
+# port option to koan if it is not the default.
+xmlrpc_port: 25151
+
+# "cobbler repo add" commands set cobbler up with repository
+# information that can be used during kickstart and is automatically
+# set up in the cobbler kickstart templates.  By default, these
+# are only available at install time.  To make these repositories
+# usable on installed systems (since cobbler makes a very convient)
+# mirror, set this to 1.  Most users can safely set this to 1.  Users
+# who have a dual homed cobbler server, or are installing laptops that
+# will not always have access to the cobbler server may wish to leave
+# this as 0.  In that case, the cobbler mirrored yum repos are still
+# accessable at http://cobbler.example.org/cblr/repo_mirror and yum
+# configuration can still be done manually.  This is just a shortcut.
+yum_post_install_mirror: 1
+
+# the default yum priority for all the distros.  This is only used
+# if yum-priorities plugin is used.  1=maximum.  Tweak with caution.
+yum_distro_priority: 1
+
+# Flags to use for yumdownloader.  Not all versions may support
+# --resolve.
+yumdownloader_flags: "--resolve"
+
+# sort and indent JSON output to make it more human-readable
+serializer_pretty_json: 0
+
+# replication rsync options for distros, kickstarts, snippets set to override default value of "-avzH"
+replicate_rsync_options: "-avzH"
+
+# replication rsync options for repos set to override default value of "-avzH"
+replicate_repo_rsync_options: "-avzH"
+
+# always write DHCP entries, regardless if netboot is enabled
+always_write_dhcp_entries: 0
+
+# external proxy - used by: get-loaders, reposync, signature update
+# eg: proxy_url_ext: "http://192.168.1.1:8080"
+proxy_url_ext: ""
+
+# internal proxy - used by systems to reach cobbler for kickstarts
+# eg: proxy_url_int: "http://10.0.0.1:8080"
+proxy_url_int: ""
+

+ 43 - 0
appliance/roles/provision/files/inventory_creation.yml

@@ -0,0 +1,43 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - name: Read dhcp file
+      set_fact:
+        var: "{{ lookup('file', '/var/lib/dhcpd/dhcpd.leases').split()| unique | select| list }}"
+
+    - name: Filter the ip
+      set_fact:
+        vars_new: "{{ var| ipv4('address')| to_nice_yaml}}"
+
+    - name: Create the static ip
+      shell: awk -F',' 'NR >1{print $3}' omnia/appliance/roles/provision/files/new_mapping_file.csv > static_hosts.yml
+      changed_when: false
+      ignore_errors: true
+
+    - name: Create the dynamic inventory
+      shell: |
+        echo "[all]" >  omnia/appliance/roles/inventory/files/provisioned_hosts.yml
+        echo "{{ vars_new }}" > temp.txt
+        egrep -o '[1-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' temp.txt >>dynamic_hosts.yml
+      changed_when: false
+      ignore_errors: true
+
+    - name: Final inventory
+      shell: cat dynamic_hosts.yml static_hosts.yml| sort -ur  >> omnia/appliance/roles/inventory/files/provisioned_hosts.yml
+      changed_when: false     

+ 121 - 0
appliance/roles/provision/files/kickstart.yml

@@ -0,0 +1,121 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Initial cobbler setup
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  vars:
+    name_iso: CentOS7
+    distro_name: CentOS7-x86_64
+  tasks:
+  - name: Inside cobbler container
+    debug:
+      msg: "Hiii! I am cobbler"
+
+  - name: Start xinetd
+    service:
+      name: "{{ item }}"
+      state: started
+    loop:
+      - cobblerd
+      - xinetd
+      - rsyncd
+      - tftp
+      - httpd
+
+  - name: Cobbler get-loaders
+    command: cobbler get-loaders
+    changed_when: false
+
+  - name: Get fence agents
+    package:
+      name: fence-agents
+      state: present
+
+  - name: Replace in /etc/debian
+    replace:
+      path: "/etc/debmirror.conf"
+      regexp: "^@dists=\"sid\";"
+      replace: "#@dists=\"sid\";"
+
+  - name: Replace in /etc/debian
+    replace:
+      path: "/etc/debmirror.conf"
+      regexp: "^@arches=\"i386\";"
+      replace: "#@arches=\"i386\";"
+
+  - name: Adding curl
+    shell: export PATH="/usr/bin/curl:$PATH"
+
+  - name: Run import command
+    command: cobbler import --arch=x86_64 --path=/mnt --name="{{ name_iso }}"
+    changed_when: false
+
+  - name: Distro list
+    command: cobbler distro edit --name="{{ distro_name }}" --kernel=/var/www/cobbler/ks_mirror/CentOS7-x86_64/isolinux/vmlinuz --initrd=/var/www/cobbler/ks_mirror/CentOS7-x86_64/isolinux/initrd.img
+    changed_when: false
+
+  - name: Kickstart profile
+    command: cobbler profile edit --name="{{ distro_name }}" --kickstart=/var/lib/cobbler/kickstarts/centos7.ks
+    changed_when: false
+
+  - name: Syncing of cobbler
+    command: cobbler sync
+    changed_when: false
+  
+  - name: Disable default apache webpage
+    blockinfile:
+      state: present
+      insertafter: '^#insert the content here for disabling the default apache webpage'
+      dest: /etc/httpd/conf/httpd.conf
+      block: |
+        <Directory />
+           Order Deny,Allow
+           Deny from all
+           Options None
+           AllowOverride None
+         </Directory>
+
+  - name: Restart cobbler
+    service:
+      name: cobblerd
+      state: restarted
+ 
+  - name: Restart httpdd
+    service:
+      name: httpd
+      state: restarted
+
+  - name: Restart xinetd
+    service:
+      name: xinetd
+      state: restarted
+
+  - name: Restart dhcpd
+    service:
+      name: dhcpd
+      state: restarted
+
+  - name: Add tftp cron job
+    cron:
+      name: Start tftp service
+      minute: "*"
+      job: "ansible-playbook /root/tftp.yml"
+
+  - name: Add inventory cron job
+    cron:
+      name: Create inventory
+      minute: "*/5"
+      job: "ansible-playbook /root/inventory_creation.yml"

+ 84 - 0
appliance/roles/provision/files/modules.conf

@@ -0,0 +1,84 @@
+# cobbler module configuration file
+# =================================
+
+# authentication:
+# what users can log into the WebUI and Read-Write XMLRPC?
+# choices:
+#    authn_denyall    -- no one (default)
+#    authn_configfile -- use /etc/cobbler/users.digest (for basic setups)
+#    authn_passthru   -- ask Apache to handle it (used for kerberos)
+#    authn_ldap       -- authenticate against LDAP
+#    authn_spacewalk  -- ask Spacewalk/Satellite (experimental)
+#    authn_pam        -- use PAM facilities
+#    authn_testing    -- username/password is always testing/testing (debug)
+#    (user supplied)  -- you may write your own module
+# WARNING: this is a security setting, do not choose an option blindly.
+# for more information:
+# https://github.com/cobbler/cobbler/wiki/Cobbler-web-interface
+# https://github.com/cobbler/cobbler/wiki/Security-overview
+# https://github.com/cobbler/cobbler/wiki/Kerberos
+# https://github.com/cobbler/cobbler/wiki/Ldap
+
+[authentication]
+module = authn_configfile
+
+# authorization:
+# once a user has been cleared by the WebUI/XMLRPC, what can they do?
+# choices:
+#    authz_allowall   -- full access for all authneticated users (default)
+#    authz_ownership  -- use users.conf, but add object ownership semantics
+#    (user supplied)  -- you may write your own module
+# WARNING: this is a security setting, do not choose an option blindly.
+# If you want to further restrict cobbler with ACLs for various groups,
+# pick authz_ownership.  authz_allowall does not support ACLs.  configfile
+# does but does not support object ownership which is useful as an additional
+# layer of control.
+
+# for more information:
+# https://github.com/cobbler/cobbler/wiki/Cobbler-web-interface
+# https://github.com/cobbler/cobbler/wiki/Security-overview
+# https://github.com/cobbler/cobbler/wiki/Web-authorization
+
+[authorization]
+module = authz_allowall
+
+# dns:
+# chooses the DNS management engine if manage_dns is enabled
+# in /etc/cobbler/settings, which is off by default.
+# choices:
+#    manage_bind    -- default, uses BIND/named
+#    manage_dnsmasq -- uses dnsmasq, also must select dnsmasq for dhcp below
+# NOTE: more configuration is still required in /etc/cobbler
+# for more information:
+# https://github.com/cobbler/cobbler/wiki/Dns-management
+
+[dns]
+module = manage_dnsmasq
+
+# dhcp:
+# chooses the DHCP management engine if manage_dhcp is enabled
+# in /etc/cobbler/settings, which is off by default.
+# choices:
+#    manage_isc     -- default, uses ISC dhcpd
+#    manage_dnsmasq -- uses dnsmasq, also must select dnsmasq for dns above
+# NOTE: more configuration is still required in /etc/cobbler
+# for more information:
+# https://github.com/cobbler/cobbler/wiki/Dhcp-management
+
+[dhcp]
+module = manage_isc
+
+# tftpd:
+# chooses the TFTP management engine if manage_tftp is enabled
+# in /etc/cobbler/settings, which is ON by default.
+#
+# choices:
+#    manage_in_tftpd -- default, uses the system's tftp server
+#    manage_tftpd_py -- uses cobbler's tftp server
+#
+
+[tftpd]
+module = manage_in_tftpd
+
+#--------------------------------------------------
+

+ 11 - 20
slurm/slurm.yml

@@ -1,4 +1,4 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved. 
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
 #
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
@@ -12,25 +12,16 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 ---
-#Playbook for installing Slurm on a cluster 
 
-#collect info from everything
-- hosts: all
-
-# Apply Common Installation and Config
-- hosts: cluster
-  gather_facts: false
-  roles:
-    - slurm-common
-
-# Apply Master Config, start services
-- hosts: master
+- name: Start cobbler on reboot
+  hosts: localhost
+  connection: local
   gather_facts: false
-  roles:
-    - slurm-master
+  tasks:
+    - name: Wait for 2 minutes
+      pause:
+        minutes: 2
 
-# Start SLURM workers
-- hosts: compute
-  gather_facts: false
-  roles:
-    - start-slurm-workers
+    - name: Execute cobbler sync in cobbler container
+      command: docker exec cobbler cobbler sync
+      changed_when: true

+ 64 - 0
appliance/roles/provision/files/temp_centos7.ks

@@ -0,0 +1,64 @@
+#version=DEVEL
+
+# Use network installation
+url --url http://ip/cblr/links/CentOS7-x86_64/
+
+# Install OS instead of upgrade
+install
+
+# Use text install
+text
+
+# SELinux configuration
+selinux --disabled
+
+# Firewall configuration
+firewall --disabled
+
+# Do not configure the X Window System
+skipx
+
+# Run the Setup Agent on first boot
+#firstboot --enable
+ignoredisk --only-use=sda
+
+# Keyboard layouts
+keyboard us
+
+# System language
+lang en_US
+
+# Network information
+network  --bootproto=dhcp --device=nic --onboot=on
+
+# Root password
+rootpw --iscrypted password
+
+# System services
+services --enabled="chronyd"
+
+# System timezone
+timezone Asia/Kolkata --isUtc
+
+# System bootloader configuration
+bootloader --location=mbr --boot-drive=sda
+
+# Partition clearing information
+clearpart --all --initlabel --drives=sda
+
+# Clear the Master Boot Record
+zerombr
+
+# Disk Partitioning
+partition /boot/efi --asprimary --fstype=vfat --label EFI  --size=200
+partition /boot     --asprimary --fstype=ext4 --label BOOT --size=500
+partition /         --asprimary --fstype=ext4 --label ROOT --size=4096 --grow
+
+# Reboot after installation
+reboot
+
+%packages
+@core
+net-tools
+%end
+

+ 93 - 0
appliance/roles/provision/files/temp_dhcp.template

@@ -0,0 +1,93 @@
+# ******************************************************************
+# Cobbler managed dhcpd.conf file
+#
+# generated from cobbler dhcp.conf template ($date)
+# Do NOT make changes to /etc/dhcpd.conf. Instead, make your changes
+# in /etc/cobbler/dhcp.template, as /etc/dhcpd.conf will be
+# overwritten.
+#
+# ******************************************************************
+
+ddns-update-style interim;
+
+allow booting;
+allow bootp;
+
+ignore client-updates;
+set vendorclass = option vendor-class-identifier;
+
+option pxe-system-type code 93 = unsigned integer 16;
+
+subnet subnet_mask netmask net_mask {
+option subnet-mask net_mask;
+range dynamic-bootp start end;
+default-lease-time  21600;
+max-lease-time  43200;
+next-server $next_server;
+#insert the static DHCP leases for configuration here
+
+
+     class "pxeclients" {
+          match if substring (option vendor-class-identifier, 0, 9) = "PXEClient";
+          if option pxe-system-type = 00:02 {
+                  filename "ia64/elilo.efi";
+          } else if option pxe-system-type = 00:06 {
+                  filename "grub/grub-x86.efi";
+          } else if option pxe-system-type = 00:07 {
+                  filename "grub/grub-x86_64.efi";
+          } else if option pxe-system-type = 00:09 {
+                  filename "grub/grub-x86_64.efi";
+          } else {
+                  filename "pxelinux.0";
+          }
+     }
+
+}
+
+#for dhcp_tag in $dhcp_tags.keys():
+    ## group could be subnet if your dhcp tags line up with your subnets
+    ## or really any valid dhcpd.conf construct ... if you only use the
+    ## default dhcp tag in cobbler, the group block can be deleted for a
+    ## flat configuration
+# group for Cobbler DHCP tag: $dhcp_tag
+group {
+        #for mac in $dhcp_tags[$dhcp_tag].keys():
+            #set iface = $dhcp_tags[$dhcp_tag][$mac]
+    host $iface.name {
+        #if $iface.interface_type == "infiniband":
+        option dhcp-client-identifier = $mac;
+        #else
+        hardware ethernet $mac;
+        #end if
+        #if $iface.ip_address:
+        fixed-address $iface.ip_address;
+        #end if
+        #if $iface.hostname:
+        option host-name "$iface.hostname";
+        #end if
+        #if $iface.netmask:
+        option subnet-mask $iface.netmask;
+        #end if
+        #if $iface.gateway:
+        option routers $iface.gateway;
+        #end if
+        #if $iface.enable_gpxe:
+        if exists user-class and option user-class = "gPXE" {
+            filename "http://$cobbler_server/cblr/svc/op/gpxe/system/$iface.owner";
+        } else if exists user-class and option user-class = "iPXE" {
+            filename "http://$cobbler_server/cblr/svc/op/gpxe/system/$iface.owner";
+        } else {
+            filename "undionly.kpxe";
+        }
+        #else
+        filename "$iface.filename";
+        #end if
+        ## Cobbler defaults to $next_server, but some users
+        ## may like to use $iface.system.server for proxied setups
+        next-server $next_server;
+        ## next-server $iface.next_server;
+    }
+        #end for
+}
+#end for
+

+ 19 - 0
appliance/roles/provision/files/tftp

@@ -0,0 +1,19 @@
+# default: off
+# description: The tftp server serves files using the trivial file transfer \
+#       protocol.  The tftp protocol is often used to boot diskless \
+#       workstations, download configuration files to network-aware printers, \
+#       and to start the installation process for some operating systems.
+service tftp
+{
+        socket_type             = dgram
+        protocol                = udp
+        wait                    = yes
+        user                    = root
+        server                  = /usr/sbin/in.tftpd
+        server_args             = -s /var/lib/tftpboot
+        disable                 = no
+        per_source              = 11
+        cps                     = 100 2
+        flags                   = IPv4
+}
+

+ 46 - 0
appliance/roles/provision/files/tftp.yml

@@ -0,0 +1,46 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Start tftp and dhcp
+  hosts: localhost
+  connection: local
+  tasks:
+    - name: Fetch tftp status
+      command: systemctl is-active tftp
+      args:
+        warn: no
+      register: tftp_status
+      ignore_errors: yes
+      changed_when: false
+
+    - name: Start tftp if inactive state
+      command: systemctl start tftp.service
+      args:
+        warn: no
+      when: "('inactive' in tftp_status.stdout) or ('unknown' in tftp_status.stdout)"
+
+    - name: Fetch dhcp status
+      command: systemctl is-active dhcpd
+      args:
+        warn: no
+      register: dhcp_status
+      ignore_errors: yes
+      changed_when: false
+
+    - name: Start dhcp if inactive state
+      command: systemctl start dhcpd.service
+      args:
+        warn: no
+      when: "('inactive' in dhcp_status.stdout) or ('unknown' in dhcp_status.stdout)"

+ 87 - 0
appliance/roles/provision/tasks/check_prerequisites.yml

@@ -0,0 +1,87 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Initialize variables
+  set_fact:
+    cobbler_container_status: false
+    cobbler_image_status: false
+    cobbler_config_status: false
+    backup_map_status: false
+    new_node_status: false
+  tags: install
+
+- name: Check if any backup file exists
+  block:
+  - name: Check status of backup file
+    stat:
+      path: "{{ role_path }}/files/backup_mapping_file.csv"
+    register: backup_map
+
+  - name: Set status for backup file
+    set_fact:
+      backup_map_status: true
+    when: backup_map.stat.exists == true  
+  rescue:
+  - name: Message
+    debug:
+      msg: "All nodes are new"
+      verbosity: 2
+
+- name: Inspect the cobbler image
+  docker_image_info:
+    name: cobbler
+  register: cobbler_image_result
+  tags: install
+
+- name: Check cobbler status on the machine
+  docker_container_info:
+    name: cobbler
+  register: cobbler_result
+  tags: install
+
+- name: Update cobbler image status
+  set_fact:
+    cobbler_image_status: true
+  when: cobbler_image_result.images| length==1
+  tags: install
+
+- name: Update cobbler container status
+  set_fact:
+    cobbler_container_status: true
+  when: cobbler_result.exists
+  tags: install
+
+- name: Fetch cobbler profile list
+  command: docker exec cobbler cobbler profile list
+  changed_when: false
+  register: cobbler_profile_list
+  ignore_errors: true
+  when: cobbler_container_status == true
+
+- name: Check crontab list
+  command: docker exec cobbler crontab -l
+  changed_when: false
+  register: crontab_list
+  ignore_errors: true
+  when: cobbler_container_status == true
+
+- name: Update cobbler container status
+  set_fact:
+    cobbler_config_status: true
+  when:
+    - cobbler_container_status == true
+    - "'CentOS' in cobbler_profile_list.stdout"
+    - "'* * * * * ansible-playbook /root/tftp.yml' in crontab_list.stdout"
+    - "'5 * * * * ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"

+ 30 - 0
appliance/roles/provision/tasks/cobbler_image.yml

@@ -0,0 +1,30 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Image creation (It may take 5-10 mins)
+  docker_image:
+    name: "{{ docker_image_name }}"
+    tag: "{{ docker_image_tag }}"
+    source: build
+    build:
+      path: "{{ role_path }}/files/"
+      network: host
+    state: present
+  tags: install
+
+- name: Run cobbler container
+  command: "{{ cobbler_run_command }}"
+  changed_when: false
+  tags: install

+ 56 - 0
appliance/roles/provision/tasks/configure_cobbler.yml

@@ -0,0 +1,56 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Delete the cobbler container if exits
+  docker_container:
+    name: cobbler
+    state: absent
+  tags: install
+  when: cobbler_container_status == true and cobbler_config_status == false
+
+- name: Run cobbler container
+  command: "{{ cobbler_run_command }}"
+  changed_when: false
+  tags: install
+  when: cobbler_container_status == true and cobbler_config_status == false
+
+- name: Configuring cobbler inside container (It may take 5-10 mins)
+  command: docker exec cobbler ansible-playbook /root/kickstart.yml
+  changed_when: false
+  tags: install
+  when: cobbler_config_status == false
+
+- name: Schedule task
+  cron:
+    name: "start cobbler on reboot"
+    special_time: reboot
+    job: "ansible-playbook {{ role_path }}/files/start_cobbler.yml"
+  tags: install
+  when: cobbler_config_status == false
+
+- name: Execute cobbler sync in cobbler container
+  command: docker exec cobbler cobbler sync
+  changed_when: true
+  when: cobbler_config_status == true
+
+- name: Remove the files
+  file:
+    path: "{{ item }}"
+    state: absent
+  with_items:
+    - "{{ role_path }}/files/.users.digest"
+    - "{{ role_path }}/files/dhcp.template"
+    - "{{ role_path }}/files/settings"
+    - "{{ role_path }}/files/centos7.ks"
+    - "{{ role_path }}/files/new_mapping_file.csv.bak"

+ 60 - 0
appliance/roles/provision/tasks/dhcp_configure.yml

@@ -0,0 +1,60 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Create the dhcp template
+  copy:
+    src: "{{ role_path }}/files/temp_dhcp.template"
+    dest: "{{ role_path }}/files/dhcp.template"
+    mode: 0775
+  tags: install
+
+- name: Assign subnet and netmask
+  replace:
+    path: "{{ role_path }}/files/dhcp.template"
+    regexp: '^subnet subnet_mask netmask net_mask {'
+    replace: 'subnet {{ subnet }} netmask {{ netmask }} {'
+  tags: install
+
+- name: Assign netmask
+  replace:
+    path: "{{ role_path }}/files/dhcp.template"
+    regexp: '^option subnet-mask net_mask;'
+    replace: 'option subnet-mask {{ netmask }};'
+
+- name: Assign DHCP range
+  replace:
+    path: "{{ role_path }}/files/dhcp.template"
+    regexp: '^range dynamic-bootp start end;'
+    replace: 'range dynamic-bootp {{ dhcp_start_ip }} {{ dhcp_end_ip }};'
+
+- name: Create the cobbler settings file
+  copy:
+    src: "{{ role_path }}/files/cobbler_settings"
+    dest: "{{ role_path }}/files/settings"
+    mode: 0775
+  tags: install
+
+- name: Assign server ip
+  replace:
+    path: "{{ role_path }}/files/settings"
+    regexp: '^server: ip'
+    replace: 'server: {{ hpc_ip }}'
+
+- name: Assign next server ip
+  replace:
+    path: "{{ role_path }}/files/settings"
+    regexp: '^next_server: ip'
+    replace: 'next_server: {{ hpc_ip }}'
+

+ 64 - 0
appliance/roles/provision/tasks/firewall_settings.yml

@@ -0,0 +1,64 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+#Tasks for modifying firewall configurations for Cobbler
+
+- name: Permit traffic in default zone on port 80/tcp
+  firewalld:
+    port: 80/tcp
+    permanent: yes
+    state: enabled
+  tags: install
+
+- name:  Permit traffic in default zone on port 443/tcp
+  firewalld:
+    port: 443/tcp
+    permanent: yes
+    state: enabled
+  tags: install
+
+- name: Permit traffic in default zone for dhcp service
+  firewalld:
+    service: dhcp
+    permanent: yes
+    state: enabled
+  tags: install
+
+- name:  Permit traffic in default zone on port 69/tcp
+  firewalld:
+    port: 69/tcp
+    permanent: yes
+    state: enabled
+  tags: install
+
+- name:  Permit traffic in default zone on port 69/udp
+  firewalld:
+    port: 69/udp
+    permanent: yes
+    state: enabled
+  tags: install
+
+- name:  Permit traffic in default zone on port 4011/udp
+  firewalld:
+    port: 4011/udp
+    permanent: yes
+    state: enabled
+  tags: install
+
+- name: Reboot firewalld
+  systemd:
+    name: firewalld
+    state: reloaded
+  tags: install

+ 66 - 0
appliance/roles/provision/tasks/main.yml

@@ -0,0 +1,66 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+#Tasks for Deploying cobbler on the system
+
+- name: Check cobbler status on machine
+  include_tasks: check_prerequisites.yml
+
+- name: Mount iso image
+  import_tasks: mount_iso.yml
+  when: not cobbler_image_status
+
+- name: Modify firewall settings for Cobbler
+  import_tasks: firewall_settings.yml
+  when: not cobbler_container_status
+
+- name: Include common variables
+  include_vars: ../../common/vars/main.yml
+  when: not cobbler_container_status
+
+- name: Internet validation
+  include_tasks: ../../common/tasks/internet_validation.yml
+  when: not cobbler_container_status
+
+- name: Provision password validation
+  import_tasks: provision_password.yml
+  when: not cobbler_image_status
+
+- name: Dhcp Configuration
+  import_tasks: dhcp_configure.yml
+  when: (not cobbler_image_status) or ( backup_map_status == true)
+
+- name: Mapping file validation
+  import_tasks: mapping_file.yml
+  when: (not cobbler_image_status) and (mapping_file == true) or ( backup_map_status == true)
+
+- name: Cobbler image creation
+  import_tasks: cobbler_image.yml
+  when: not cobbler_container_status
+
+- name: Cobbler configuration
+  import_tasks: configure_cobbler.yml
+
+- name: Cobbler container status message
+  block:
+    - debug:
+        msg: "{{ message_skipped }}"
+        verbosity: 2
+      when: cobbler_container_status
+    - debug:
+        msg: "{{ message_installed }}"
+        verbosity: 2
+      when: not cobbler_container_status
+  tags: install

+ 166 - 0
appliance/roles/provision/tasks/mapping_file.yml

@@ -0,0 +1,166 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# limitations under the License.
+---
+
+- name: Check if file is comma seperated
+  shell: awk -F\, '{print NF-1}' {{ path_for_mapping_file }}
+  register: comma_seperated
+  changed_when: false
+  tags: install
+
+- name: Fail if not comma seperated
+  fail:
+    msg: "{{ not_comma_seperated }}"
+  when: item != "2"
+  with_items: "{{ comma_seperated.stdout_lines }}"
+  tags: install
+
+- name: Remove blank lines
+  shell:  awk -F, 'length>NF+1' {{ path_for_mapping_file }} > {{ role_path }}/files/new_mapping_file.csv
+  changed_when: false
+  tags: install
+
+- name: Remove blank spaces
+  shell:  sed -i.bak -E 's/(^|,)[[:blank:]]+/\1/g; s/[[:blank:]]+(,|$)/\1/g'  {{ role_path }}/files/new_mapping_file.csv
+  args:
+    warn: no
+  changed_when: false
+  tags: install
+
+- name: Check if header present
+  shell:  awk 'NR==1 { print $1}' {{ role_path }}/files/new_mapping_file.csv
+  register: header
+  changed_when: false
+  tags: install
+
+- name: Fail if header not present
+  fail:
+    msg: "{{ header_fail }}"
+  when: header.stdout !=  valid_header
+
+- name: Count the hostname
+  shell: awk -F',' '{print $2}' {{ role_path }}/files/new_mapping_file.csv | wc -l
+  register: total_hostname
+  changed_when: false
+  tags: install
+
+- name: Count the ip
+  shell: awk -F',' '{print $3}' {{ role_path }}/files/new_mapping_file.csv | wc -l
+  register: total_ip
+  changed_when: false
+  tags: install
+
+- name: Count the macs
+  shell: awk -F',' '{print $1}' {{ role_path }}/files/new_mapping_file.csv | wc -l
+  register: total_mac
+  changed_when: false
+  tags: install
+
+- name: Check for duplicate hostname
+  shell: awk -F',' '{print $2}' {{ role_path }}/files/new_mapping_file.csv | uniq | wc -l
+  register: uniq_hostname
+  changed_when: false
+  tags: install
+
+- name: Check for duplicate ip
+  shell: awk -F',' '{print $3}' {{ role_path }}/files/new_mapping_file.csv | uniq | wc -l
+  register: uniq_ip
+  changed_when: false
+  tags: install
+
+- name: Check for duplicate mac
+  shell: awk -F',' '{print $1}' {{ role_path }}/files/new_mapping_file.csv | uniq | wc -l
+  register: uniq_mac
+  changed_when: false
+  tags: install
+
+- name: Fail if duplicate hosts exist
+  fail:
+    msg: "{{ fail_hostname_duplicate }}"
+  when:  total_hostname.stdout >  uniq_hostname.stdout
+  tags: install
+
+- name: Fail if duplicate ips exist
+  fail:
+    msg: "{{ fail_ip_duplicate }}"
+  when:  total_ip.stdout >  uniq_ip.stdout
+  tags: install
+
+- name: Fail if duplicate mac exist
+  fail:
+    msg: "{{ fail_mac_duplicate }}"
+  when:  total_mac.stdout >  uniq_mac.stdout
+  tags: install
+
+- name: Check if _ or . or space present in hostname
+  shell: awk -F',' '{print $2}' {{ role_path }}/files/new_mapping_file.csv |grep -E -- '_|\.| '
+  register: hostname_result
+  ignore_errors: true
+  changed_when: false
+  tags: install
+
+- name: Fail if  _ or . or space present in hostname
+  fail:
+    msg: "{{ hostname_result.stdout + ' :Hostname should not contain _ or . as it will cause error with slurm and K8s'}}"
+  when: hostname_result.stdout != ""
+  tags: install
+
+- name: Compare the file for new nodes
+  block:
+  - name: difference
+    shell: diff {{ role_path }}/files/new_mapping_file.csv {{role_path}}/files/backup_mapping_file.csv| tr -d \>|tr -d \<| grep -E -- ', & :| '
+    register: diff_output
+    when: backup_map_status == true
+
+  - name: status of new nodes
+    set_fact:
+      new_node_status: true
+    when: diff_output.stdout!= ""
+  rescue:
+  - name: No new nodes
+    debug:
+      msg: "No new nodes to add"
+      verbosity: 2
+
+- name: Fetch input
+  blockinfile:
+    path: "{{ role_path }}/files/dhcp.template"
+    insertafter: '^#insert the static DHCP leases for configuration here'
+    block: |
+      host {{ item.split(',')[1] }} {
+        hardware ethernet {{ item.split(',')[0] }};
+        fixed-address {{ item.split(',')[2] }};
+      }
+    marker: "# {mark} DHCP BLOCK OF {{ item.split(',')[0] }}"
+  with_lines: "{{ remove_header }}"
+  ignore_errors: true
+  when: (not cobbler_image_status) or (new_node_status == true)
+  tags: install
+
+- name: Create a backup file
+  copy:
+    src: "{{ role_path }}/files/new_mapping_file.csv"
+    dest: "{{ role_path }}/files/backup_mapping_file.csv"
+
+- name: Copy the dhcp.template inside container
+  command: docker exec cobbler cp /root/omnia/appliance/roles/provision/files/dhcp.template /etc/cobbler/dhcp.template
+  when:  ( cobbler_container_status == true ) and ( new_node_status == true )
+
+- name: Cobbler sync for adding new nodes
+  command: docker exec cobbler cobbler sync
+  when:  ( cobbler_container_status == true ) and ( new_node_status == true )
+
+- name: Restart dhcpd
+  command: docker exec cobbler systemctl restart dhcpd
+  when:  ( cobbler_container_status == true ) and ( new_node_status == true )
+

+ 44 - 0
appliance/roles/provision/tasks/mount_iso.yml

@@ -0,0 +1,44 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Initialize vars
+  set_fact:
+    mount_check: true
+  tags: install
+
+- name: Create iso directory
+  file:
+    path: "/mnt/{{ iso_path }}"
+    state: directory
+  tags: install
+
+- name: Check mountpoint
+  command: mountpoint /mnt/{{ iso_path }}
+  changed_when: false
+  register: result
+  ignore_errors: yes
+  tags: install
+
+- name: Update mount status
+  set_fact:
+    mount_check: "{{ result.failed }}"
+  tags: install
+
+- name: Mount the iso file
+  command: mount -o loop {{ path_for_iso_file }} /mnt/{{ iso_path }}
+  changed_when: false
+  args:
+    warn: no
+  when: mount_check == true
+  tags: install

+ 89 - 0
appliance/roles/provision/tasks/provision_password.yml

@@ -0,0 +1,89 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Remove old user
+  file:
+    path: "{{ role_path }}/files/.users.digest"
+    state: absent
+  tags: install
+
+- name: Create a new user
+  file:
+    path: "{{ role_path }}/files/.users.digest"
+    state: touch
+    mode: 0644
+  tags: install
+
+- name: Encrypt cobbler password
+  shell: printf "%s:%s:%s" {{ username }} "Cobbler" "{{ cobbler_password }}" | md5sum | awk '{print $1}'
+  changed_when: false
+  register: encrypt_password
+  no_log: true
+  tags: install
+
+- name: Copy cobbler password to cobbler config file
+  shell: printf "%s:%s:%s\n" "{{ username }}" "Cobbler" "{{ encrypt_password.stdout }}" > "{{ role_path }}/files/.users.digest"
+  changed_when: false
+  no_log: true
+  tags: install
+
+- name: Create the kickstart file
+  copy:
+    src: "{{ role_path }}/files/temp_centos7.ks"
+    dest: "{{ role_path }}/files/centos7.ks"
+    mode: 0775
+  tags: install
+
+- name: Configure kickstart file- IP
+  replace:
+    path: "{{ role_path }}/files/centos7.ks"
+    regexp: '^url --url http://ip/cblr/links/CentOS7-x86_64/'
+    replace: url --url http://{{ hpc_ip }}/cblr/links/CentOS7-x86_64/
+  tags: install
+
+- name: Random phrase generation
+  command: openssl rand -base64 12
+  changed_when: false
+  register: prompt_random_phrase
+  tags: install
+  no_log: true
+
+- name: Set random phrase
+  set_fact:
+    random_phrase: "{{ prompt_random_phrase.stdout }}"
+  tags: install
+  no_log: true
+
+- name: Login password
+  command: openssl passwd -1 -salt {{ random_phrase }} {{ cobbler_password }}
+  no_log: true
+  changed_when: false
+  register: login_pass
+  tags: install
+
+- name: Configure kickstart file- Password
+  replace:
+    path: "{{ role_path }}/files/centos7.ks"
+    regexp: '^rootpw --iscrypted password'
+    replace: 'rootpw --iscrypted {{ login_pass.stdout }}'
+  no_log: true
+  tags: install
+
+- name: Configure kickstart file- nic
+  replace:
+    path: "{{ role_path }}/files/centos7.ks"
+    regexp: '^network  --bootproto=dhcp --device=nic --onboot=on'
+    replace: 'network  --bootproto=dhcp --device={{ nic }} --onboot=on'
+  tags: install

+ 45 - 0
appliance/roles/provision/vars/main.yml

@@ -0,0 +1,45 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# vars file for provision
+
+#Usage: mapping_file.yml
+fail_hostname_duplicate:  "Failed: Duplicate hostname exists. Please verify mapping file again."
+remove_header: awk 'NR > 1 { print }' {{ role_path }}/files/new_mapping_file.csv
+fail_ip_duplicate:  "Failed: Duplicate ip exists. Please verify mapping file again."
+fail_mac_duplicate:  "Failed: Duplicate mac exists. Please verify mapping file again."
+header_fail: "Failed: Header (MAC,Hostname,IP) should be present in the mapping file"
+valid_header: MAC,Hostname,IP
+not_comma_seperated: "Failed: Mapping file should be comma seperated." 
+
+#Usage: check_prerequisite.yml
+iso_name: CentOS-7-x86_64-Minimal-2009.iso
+iso_fail: "Iso file not found. Download and copy the iso file to omnia/appliance/roles/provision/files"
+
+# Usage: provision_password.yml
+provision_encrypted_dest: ../files/
+username: cobbler
+
+# Usage: cobbler_image.yml
+docker_image_name: cobbler
+docker_image_tag: latest
+cobbler_run_command: docker run -itd --privileged --net=host --restart=always -v {{ mount_path }}:/root/omnia  -v cobbler_www:/var/www/cobbler:Z -v cobbler_backup:/var/lib/cobbler/backup:Z -v /mnt/iso:/mnt:Z -p 69:69/udp -p 81:80 -p 443:443 -p 25151:25151 --name cobbler  cobbler:latest  /sbin/init
+
+# Usage: main.yml
+message_skipped: "Installation Skipped: Cobbler instance is already running in your system"
+message_installed: "Installation Successful"
+
+# Usage: mount_iso.yml
+iso_path: iso

+ 284 - 0
appliance/roles/web_ui/tasks/awx_configuration.yml

@@ -0,0 +1,284 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# Get Current AWX configuration
+- name: Waiting for 30 seconds for UI components to be accessible
+  wait_for:
+    timeout: 30
+
+- name: Organization list
+  block:
+    - name: Get organization list
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        organizations list -f human
+      register: organizations_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ organizations_list.stdout | regex_replace(awx_user) | regex_replace(admin_password) }}"
+
+- name: Project list
+  block:
+    - name: Get project list
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        projects list -f human
+      register: projects_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ projects_list.stdout | regex_replace(awx_user) | regex_replace(admin_password) }}"
+
+- name: Inventory list
+  block:
+    - name: Get inventory list
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        inventory list -f human
+      register: inventory_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ inventory_list.stdout | regex_replace(awx_user) | regex_replace(admin_password) }}"
+
+- name: Credential list
+  block:
+    - name: Get credentials list
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        credentials list -f human
+      register: credentials_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ credentials_list.stdout | regex_replace(awx_user) | regex_replace(admin_password) }}"
+
+- name: Template List
+  block:
+    - name: Get template list
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        job_templates list -f human
+      register: job_templates_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ job_templates_list.stdout | regex_replace(awx_user) | regex_replace(admin_password) }}"
+
+- name: Group names
+  block:
+    - name: If omnia-inventory exists, fetch group names in the inventory
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        groups list --inventory "{{ omnia_inventory_name }}" -f human
+      register: groups_list
+      when: omnia_inventory_name in inventory_list.stdout
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ groups_list.stdout | regex_replace(awx_user) | regex_replace(admin_password) }}"
+
+- name: Schedules list
+  block:
+    - name: Get schedules list
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        schedules list -f human
+      register: schedules_list
+      changed_when: no
+      no_log: True
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ schedules_list.stdout | regex_replace(awx_user) | regex_replace(admin_password) }}"
+
+# Delete Default Configurations
+- name: Delete default configurations
+  block:
+    - name: Delete default organization
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        organizations delete "{{ default_org }}"
+      when: default_org in organizations_list.stdout
+      register: register_error
+      no_log: True
+
+    - name: Delete default job template
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        job_templates delete "{{ default_template }}"
+      when: default_template in job_templates_list.stdout
+      register: register_error
+      no_log: True
+
+    - name: Delete default project
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        projects delete "{{ default_projects }}"
+      when: default_projects in projects_list.stdout
+      register: register_error
+      no_log: True
+
+    - name: Delete default credential
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        credentials delete "{{ default_credentials }}"
+      when: default_credentials in credentials_list.stdout
+      register: register_error
+      no_log: True
+
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ register_error.stdout | regex_replace(awx_user) | regex_replace(admin_password) }}"
+
+# Create required configuration if not present
+- name: Create required configurations
+  block:
+    - name: Create organisation
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        organizations create --name "{{ organization_name }}"
+      when: organization_name not in organizations_list.stdout
+      register: register_error
+      no_log: True
+
+    - name: Create new project
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        projects create --name "{{ project_name }}" --organization "{{ organization_name }}"
+        --local_path "{{ role_path.split('/')[-4] }}"
+      when: project_name not in projects_list.stdout
+      register: register_error
+      no_log: True
+
+    - name: Create new omnia inventory
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        inventory create --name "{{ omnia_inventory_name }}" --organization "{{ organization_name }}"
+      when: omnia_inventory_name not in inventory_list.stdout
+      register: register_error
+      no_log: True
+
+    - name: Create groups in omnia inventory
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        groups create --name "{{ item }}" --inventory "{{ omnia_inventory_name }}"
+      when: omnia_inventory_name not in inventory_list.stdout or item not in groups_list.stdout
+      register: register_error
+      no_log: True
+      loop: "{{ group_names }}"
+
+    - name: Create credentials for omnia
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        credentials create --name "{{ credential_name }}" --organization "{{ organization_name }}"
+        --credential_type "{{ credential_type }}"
+        --inputs '{"username": "{{ cobbler_username }}", "password": "{{ cobbler_password }}"}'
+      when: credential_name not in credentials_list.stdout
+      register: register_error
+      no_log: True
+
+    - name: DeployOmnia Template
+      block:
+        - name: Create template to deploy omnia
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+            job_templates create
+            --name "{{ omnia_template_name }}"
+            --job_type run
+            --inventory "{{ omnia_inventory_name }}"
+            --project "{{ project_name }}"
+            --playbook "{{ omnia_playbook }}"
+            --verbosity "{{ playbooks_verbosity }}"
+            --ask_skip_tags_on_launch true
+          register: register_error
+          no_log: True
+
+        - name: Associate credential
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+            job_templates associate "{{ omnia_template_name }}"
+            --credential ""{{ credential_name }}""
+          register: register_error
+          no_log: True
+
+      when: omnia_template_name not in job_templates_list.stdout
+
+    - name: DynamicInventory template
+      block:
+        - name: Create template to fetch dynamic inventory
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+            job_templates create
+            --name "{{ inventory_template_name }}"
+            --job_type run
+            --inventory "{{ omnia_inventory_name }}"
+            --project "{{ project_name }}"
+            --playbook "{{ inventory_playbook }}"
+            --verbosity "{{ playbooks_verbosity }}"
+            --use_fact_cache true
+          register: register_error
+          no_log: True
+
+        - name: Associate credential
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+            job_templates associate "{{ inventory_template_name }}"
+            --credential ""{{ credential_name }}""
+          register: register_error
+          no_log: True
+
+      when: inventory_template_name not in job_templates_list.stdout
+
+    - name: Schedule dynamic inventory template
+      block:
+        - name: Get unified job template list
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+            unified_job_templates list --name "{{ inventory_template_name }}" -f human
+          no_log: True
+          register: unified_job_template_list
+
+        - name: Get job ID
+          set_fact:
+            job_id: "{{ unified_job_template_list.stdout | regex_search('[0-9]+') }}"
+
+        - name: Schedule dynamic inventory job
+          command: >-
+            awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+            schedules create --name "{{ schedule_name }}"
+            --unified_job_template="{{ job_id }}" --rrule="{{ schedule_rule }}"
+          register: register_error
+          no_log: True
+
+      when: schedule_name not in schedules_list.stdout
+
+  rescue:
+    - name: Message
+      fail:
+        msg: "{{ register_error.stdout | regex_replace(awx_user) | regex_replace(admin_password) }}"

+ 40 - 0
appliance/roles/web_ui/tasks/check_awx_status.yml

@@ -0,0 +1,40 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+#Tasks for verifying if AWX is already installed on the system
+- name: Initialize variables
+  set_fact:
+    awx_status: false
+  tags: install
+
+- name: Check awx_task status on the machine
+  docker_container_info:
+    name: awx_task
+  register: awx_task_result
+  tags: install
+
+- name: Check awx_web status on the machine
+  docker_container_info:
+    name: awx_web
+  register: awx_web_result
+  tags: install
+
+- name: Update awx status
+  set_fact:
+    awx_status: true
+  when:
+    - awx_task_result.exists
+    - awx_web_result.exists
+  tags: install

+ 22 - 0
appliance/roles/web_ui/tasks/clone_awx.yml

@@ -0,0 +1,22 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Clone AWX repo
+  git:
+    repo: "{{ awx_git_repo }}"
+    dest: "{{ awx_repo_path }}"
+    force: yes
+    version: 15.0.0
+  tags: install

+ 40 - 0
appliance/roles/web_ui/tasks/firewall_settings.yml

@@ -0,0 +1,40 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+#Tasks for modifying firewall configurations for AWX
+
+- name: Masquerading on public zone
+  firewalld:
+    masquerade: yes
+    state: enabled
+    permanent: 'true'
+    zone: public
+  tags: install
+
+- name: Add HTTP and HTTPS services to firewalld
+  firewalld:
+    service: "{{ item }}"
+    permanent: true
+    state: enabled
+  with_items:
+    - http
+    - https
+  tags: install
+
+- name: Reboot firewalld
+  systemd:
+    name: firewalld
+    state: reloaded
+  tags: install

+ 64 - 0
appliance/roles/web_ui/tasks/install_awx.yml

@@ -0,0 +1,64 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# Tasks for installing AWX
+
+- name: Change inventory file
+  replace:
+    path: "{{ awx_inventory_path }}"
+    regexp: "{{ item.regexp }}"
+    replace: "{{ item.replace }}"
+  loop:
+    - { name: Project data directory, regexp: "{{ project_data_dir_old }}" , replace: "{{ project_data_dir_new }}" }
+    - { name: Alternate DNS Servers, regexp: "{{ awx_alternate_dns_servers_old }}", replace: "{{ awx_alternate_dns_servers_new }}" }
+    - { name: Credentials, regexp: "{{ admin_password_old }}", replace: "{{ admin_password_new }}"}
+  loop_control:
+    label: "{{ item.name }}"
+  tags: install
+
+- name: Ensure port is 8081
+  lineinfile:
+    path: "{{ awx_inventory_path }}"
+    regexp: "{{ port_old }}"
+    line: "{{ port_new }}"
+    state: present
+
+- name: Create pgdocker directory
+  file:
+    path: "{{ pgdocker_dir_path }}"
+    state: directory
+    mode: 0775
+  tags: install
+
+- name: Install AWX
+  block:
+    - name: Run AWX install.yml file
+      command: ansible-playbook -i inventory install.yml --extra-vars "admin_password={{ admin_password }}"
+      args:
+        chdir: "{{ awx_installer_path }}"
+      register: awx_installation
+      no_log: True
+
+  rescue:
+    - name: Check AWX status on machine
+      include_tasks: check_awx_status.yml
+
+    - name: Fail if container are not running
+      fail:
+        msg: "AWX installation failed with error msg:
+        {{ awx_installation.stdout | regex_replace(admin_password) }}."
+      when: not awx_status
+
+  tags: install

+ 34 - 0
appliance/roles/web_ui/tasks/install_awx_cli.yml

@@ -0,0 +1,34 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# Tasks for installing AWX-CLI
+- name: Add AWX CLI repo
+  block:
+    - name: Get repo
+      get_url:
+        url: "{{ awx_cli_repo }}"
+        dest: "{{ awx_cli_repo_path }}"
+    - name: Disable gpgcheck
+      replace:
+        path: "{{ awx_cli_repo_path }}"
+        regexp: 'gpgcheck=1'
+        replace: 'gpgcheck=0'
+  tags: install
+
+- name: Install AWX-CLI
+  package:
+    name: ansible-tower-cli
+    state: present
+  tags: install

+ 76 - 0
appliance/roles/web_ui/tasks/main.yml

@@ -0,0 +1,76 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# Tasks for Deploying AWX on the system
+- name: Check AWX status on machine
+  include_tasks: check_awx_status.yml
+  tags: install
+
+- name: Include common variables
+  include_vars: ../../common/vars/main.yml
+  tags: install
+
+- name: Internet validation
+  include_tasks: ../../common/tasks/internet_validation.yml
+  when: not awx_status
+  tags: install
+
+- name: Clone AWX repo
+  include_tasks: clone_awx.yml
+  when: not awx_status
+  tags: install
+
+- name: Modify firewall config
+  include_tasks: firewall_settings.yml
+  when: not awx_status
+  tags: install
+
+- name: Install AWX
+  include_tasks: install_awx.yml
+  when: not awx_status
+  tags: install
+
+- name: Status message
+  block:
+    - debug:
+        msg: "{{ message_skipped }}"
+        verbosity: 2
+      when: awx_status
+    - debug:
+        msg: "{{ message_installed }}"
+        verbosity: 2
+      when: not awx_status
+  tags: install
+
+- name: Internet validation
+  include_tasks: ../../common/tasks/internet_validation.yml
+  tags: install
+
+- name: Install AWX-CLI
+  include_tasks: install_awx_cli.yml
+  tags: install
+
+- name: Check if AWX-UI is accessible
+  include_tasks: ui_accessibility.yml
+  tags: install
+
+- name: Configure AWX
+  block:
+    - include_tasks: awx_configuration.yml
+  rescue:
+    - name: Display msg
+      debug:
+        msg: "{{ conf_fail_msg }}"
+  tags: install

+ 85 - 0
appliance/roles/web_ui/tasks/ui_accessibility.yml

@@ -0,0 +1,85 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# Check accessibility of AWX-UI
+- name: Re-install if in migrating state
+  block:
+    - name: Wait for AWX UI to be up
+      uri:
+        url: "{{ awx_ip }}"
+        status_code: "{{ return_status }}"
+        return_content: yes
+      register: register_error
+      until: awx_ui_msg in register_error.content
+      retries: 20
+      delay: 15
+      changed_when: no
+      no_log: True
+
+  rescue:
+    - name: Starting rescue
+      debug:
+        msg: "Attempting to re-install AWX"
+
+    - name: Remove old containers
+      docker_container:
+        name: "{{ item }}"
+        state: absent
+      loop:
+        - awx_task
+        - awx_web
+
+    - name: Restart docker
+      service:
+        name: docker
+        state: restarted
+
+    - name: Re-install AWX
+      block:
+        - name: Run AWX install.yml file
+          command: ansible-playbook -i inventory install.yml --extra-vars "admin_password={{ admin_password }}"
+          args:
+            chdir: "{{ awx_installer_path }}"
+          register: awx_installation
+          no_log: True
+
+      rescue:
+        - name: Check AWX status on machine
+          include_tasks: check_awx_status.yml
+
+        - name: Fail if container are not running
+          fail:
+            msg: "AWX installation failed with error msg:
+             {{ awx_installation.stdout | regex_replace(admin_password) }}."
+          when: not awx_status
+
+    - name: Check if AWX UI is up
+      block:
+        - name: Wait for AWX UI to be up
+          uri:
+            url: "{{ awx_ip }}"
+            status_code: "{{ return_status }}"
+            return_content: yes
+          register: register_error
+          until: awx_ui_msg in register_error.content
+          retries: 30
+          delay: 10
+          changed_when: no
+          no_log: True
+      rescue:
+        - name: Message
+          fail:
+            msg: "{{ register_error | regex_replace(awx_user) | regex_replace(admin_password) }}"
+  tags: install

+ 69 - 0
appliance/roles/web_ui/vars/main.yml

@@ -0,0 +1,69 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# vars file for web_ui
+
+# Usage: clone_awx.yml
+awx_git_repo: "https://github.com/ansible/awx.git"
+docker_volume: "/var/lib/docker/volumes/{{ docker_volume_name }}"
+awx_repo_path: "{{ docker_volume }}/awx/"
+awx_installer_path: "{{ awx_repo_path }}/installer/"
+
+# Usage: install_awx.yml
+awx_inventory_path: "{{ awx_repo_path }}/installer/inventory"
+pgdocker_dir_path: /var/lib/pgdocker
+project_data_dir_old: "#project_data_dir=/var/lib/awx/projects"
+project_data_dir_new: "project_data_dir= {{ role_path + '/../../../..' }} "
+awx_alternate_dns_servers_old: '#awx_alternate_dns_servers="10.1.2.3,10.2.3.4"'
+awx_alternate_dns_servers_new: 'awx_alternate_dns_servers="8.8.8.8,8.8.4.4"'
+admin_password_old: "admin_password=password"
+admin_password_new: "#admin_password=password"
+port_old: "host_port=80"
+port_new: "host_port=8081"
+
+# Usage: main.yml
+message_skipped: "Installation Skipped: AWX instance is already running on your system"
+message_installed: "Installation Successful"
+awx_ip: http://localhost:8081
+return_status: 200
+awx_ui_msg: "Password Dialog"
+conf_fail_msg: "AWX configuration failed at the last executed task."
+
+# Usage: install_awx_cli.yml
+awx_cli_repo: "https://releases.ansible.com/ansible-tower/cli/ansible-tower-cli-centos7.repo"
+awx_cli_repo_path: "/etc/yum.repos.d/ansible-tower-cli-centos7.repo"
+
+# Usage: awx_configuration.yml
+awx_user: admin         #Don't change it. It is set as admin while installing AWX
+default_org: Default
+default_template: 'Demo Job Template'
+default_projects: 'Demo Project'
+default_credentials: 'Demo Credential'
+organization_name: DellEMC
+project_name: omnia
+omnia_inventory_name: omnia_inventory
+group_names:
+  - manager
+  - compute
+credential_name: omnia_credential
+credential_type: Machine
+cobbler_username: root
+omnia_template_name: DeployOmnia
+omnia_playbook: omnia.yml
+inventory_template_name: DynamicInventory
+inventory_playbook: appliance/inventory.yml
+playbooks_verbosity: 0
+schedule_name: DynamicInventorySchedule
+schedule_rule: "DTSTART:20201201T000000Z RRULE:FREQ=MINUTELY;INTERVAL=10"

+ 49 - 0
appliance/test/appliance_config_empty.yml

@@ -0,0 +1,49 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Password used while deploying OS on bare metal servers and for Cobbler UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+provision_password: ""
+
+# Password used for the AWX UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+awx_password: ""
+
+# The nic/ethernet card that needs to be connected to the HPC switch.
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is em1.
+hpc_nic: "em1"
+
+# The nic/ethernet card that will be connected to the public internet.
+# Default value of nic is em2
+public_nic: "em2"
+
+# This is the  path where user has kept the iso image that needs to be provisioned in target nodes.
+# The iso file should be CentOS7-2009-minimal edition.
+# Other iso files are not supported.
+iso_file_path: ""
+
+# The mapping file consists of the MAC address and its respective IP address and hostname.
+# The format of mapping file should be MAC,hostname,IP and must be a CSV file.
+# A template for mapping file exists in omnia/examples and is named as mapping_file.csv.
+# This depicts the path where user has kept the mapping file for DHCP configurations.
+mapping_file_path: ""
+
+# The dhcp range for assigning the IPv4 address to the baremetal nodes.
+# Example: 10.1.23.1
+dhcp_start_ip_range: ""
+dhcp_end_ip_range: ""

+ 49 - 0
appliance/test/appliance_config_test.yml

@@ -0,0 +1,49 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Password used while deploying OS on bare metal servers and for Cobbler UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+provision_password: "omnia@123"
+
+# Password used for the AWX UI.
+# The Length of the password should be at least 8.
+# The password must not contain -,\, ',"
+awx_password: "omnia@123"
+
+# The nic/ethernet card that needs to be connected to the HPC switch.
+# This nic will be configured by Omnia for the DHCP server.
+# Default value of nic is em1.
+hpc_nic: "em1"
+
+# The nic/ethernet card that will be connected to the public internet.
+# Default value of nic is em2
+public_nic: "em2"
+
+# This is the  path where user has kept the iso image that needs to be provisioned in target nodes.
+# The iso file should be CentOS7-2009-minimal edition.
+# Other iso files are not supported.
+iso_file_path: "/root/CentOS-7-x86_64-Minimal-2009.iso"
+
+# The mapping file consists of the MAC address and its respective IP address and hostname.
+# The format of mapping file should be MAC,hostname,IP and must be a CSV file.
+# A template for mapping file exists in omnia/examples and is named as mapping_file.csv.
+# This depicts the path where user has kept the mapping file for DHCP configurations.
+mapping_file_path: ""
+
+# The dhcp range for assigning the IPv4 address to the baremetal nodes.
+# Example: 10.1.23.1
+dhcp_start_ip_range: "172.17.0.10"
+dhcp_end_ip_range: "172.17.0.100"

+ 3 - 0
appliance/test/provisioned_hosts.yml

@@ -0,0 +1,3 @@
+[all]
+172.17.0.10
+172.17.0.15

文件差異過大導致無法顯示
+ 1882 - 0
appliance/test/test_common.yml


+ 2 - 0
appliance/test/test_mapping_file

@@ -0,0 +1,2 @@
+Mac,Hostname,IP
+xx:yy:zz:aa:bb,validation-host21,172.20.0.21

+ 608 - 0
appliance/test/test_provision_cc.yml

@@ -0,0 +1,608 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Testcase OMNIA_DIO_US_CC_TC_004
+# Execute provision role in management station and verify cobbler configuration
+- name: OMNIA_DIO_US_CC_TC_004
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+    - "{{ test_input_config_filename }}"
+  tasks:
+    - name: Check the iso file is present
+      stat:
+        path: "{{ iso_file_path }}"
+      register: iso_status
+      tags: TC_004
+
+    - name: Fail if iso file is missing
+      fail:
+        msg: "{{ iso_fail }}"
+      when: iso_status.stat.exists == false
+      tags: TC_004
+
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_004
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_004
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+      tags: TC_004
+
+    - name: Check the connection to cobbler UI and it returns a status 200
+      uri:
+        url: https://localhost/cobbler_web
+        status_code: 200
+        return_content: yes
+        validate_certs: no
+      tags: TC_004,VERIFY_004
+
+    - name: Fetch cobbler version in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler version
+      changed_when: false
+      register: cobbler_version
+      tags: TC_004,VERIFY_004
+
+    - name: Verify cobbler version
+      assert:
+        that:
+          - "'Cobbler' in cobbler_version.stdout"
+          - "'Error' not in cobbler_version.stdout"
+        fail_msg: "{{ cobbler_version_fail_msg }}"
+        success_msg: "{{ cobbler_version_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Run cobbler check command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler check
+      changed_when: false
+      register: cobbler_check
+      tags: TC_004,VERIFY_004
+
+    - name: Verify cobbler check command output
+      assert:
+        that:
+          - "'The following are potential configuration items that you may want to fix' not in cobbler_check.stdout"
+          - "'Error' not in cobbler_check.stdout"
+        fail_msg: "{{ cobbler_check_fail_msg }}"
+        success_msg: "{{ cobbler_check_success_msg }}"
+      ignore_errors: yes
+      tags: TC_004,VERIFY_004
+
+    - name: Run cobbler sync command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler sync
+      changed_when: false
+      register: cobbler_sync
+      tags: TC_004,VERIFY_004
+
+    - name: Verify cobbler sync command output
+      assert:
+        that:
+          - "'TASK COMPLETE' in cobbler_sync.stdout"
+          - "'Fail' not in cobbler_sync.stdout"
+          - "'Error' not in cobbler_sync.stdout"
+        fail_msg: "{{ cobbler_sync_fail_msg }}"
+        success_msg: "{{ cobbler_sync_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Fetch cobbler distro list
+      command: docker exec {{ docker_container_name }} cobbler distro list
+      changed_when: false
+      register: cobbler_distro_list
+      tags: TC_004,VERIFY_004
+
+    - name: Verify cobbler distro list
+      assert:
+        that:
+          - "'CentOS' in cobbler_distro_list.stdout"
+        fail_msg: "{{ cobbler_distro_list_fail_msg }}"
+        success_msg: "{{ cobbler_distro_list_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Fetch cobbler profile list
+      command: docker exec cobbler cobbler profile list
+      changed_when: false
+      register: cobbler_profile_list
+      tags: TC_004,VERIFY_004
+
+    - name: Verify cobbler profile list
+      assert:
+        that:
+          - "'CentOS' in cobbler_profile_list.stdout"
+        fail_msg: "{{ cobbler_profile_list_fail_msg }}"
+        success_msg: "{{ cobbler_profile_list_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Check kickstart file
+      shell: |
+        docker exec {{ docker_container_name }} [ -f /var/lib/cobbler/kickstarts/{{ kickstart_filename }} ] && echo "File exist" || echo "File does not exist"
+      changed_when: false
+      register: kickstart_file_status
+      tags: TC_004,VERIFY_004
+
+    - name: Verify kickstart file present
+      assert:
+        that:
+          - "'File exist' in kickstart_file_status.stdout"
+        fail_msg: "{{ kickstart_file_fail_msg }}"
+        success_msg: "{{ kickstart_file_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Check crontab list
+      command: docker exec cobbler crontab -l
+      changed_when: false
+      register: crontab_list
+      tags: TC_004,VERIFY_004
+
+    - name: Verify crontab list
+      assert:
+        that:
+          - "'* * * * * ansible-playbook /root/tftp.yml' in crontab_list.stdout"
+          - "'5 * * * * ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
+        fail_msg: "{{ crontab_list_fail_msg }}"
+        success_msg: "{{ crontab_list_success_msg }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Check tftp,dhcpd,xinetd,cobblerd service is running
+      command: docker exec cobbler systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: cobbler_service_check
+      with_items: "{{ cobbler_services }}"
+      tags: TC_004,VERIFY_004
+
+    - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
+      assert:
+        that:
+          - "'active' in cobbler_service_check.results[{{ item }}].stdout"
+          - "'inactive' not in cobbler_service_check.results[{{ item }}].stdout"
+          - "'unknown' not in cobbler_service_check.results[{{ item }}].stdout"
+        fail_msg: "{{ cobbler_service_check_fail_msg }}"
+        success_msg: "{{ cobbler_service_check_success_msg }}"
+      with_sequence: start=0 end=3
+      tags: TC_004,VERIFY_004
+
+# Testcase OMNIA_DIO_US_CDIP_TC_005
+# Execute provison role in management station where cobbler container is configured
+- name: OMNIA_DIO_US_CDIP_TC_005
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+      tags: TC_005
+
+    - name: Check the connection to cobbler UI and it returns a status 200
+      uri:
+        url: https://localhost/cobbler_web
+        status_code: 200
+        return_content: yes
+        validate_certs: no
+      tags: TC_005,VERIFY_005
+
+    - name: Fetch cobbler version in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler version
+      changed_when: false
+      register: cobbler_version
+      tags: TC_005,VERIFY_005
+
+    - name: Verify cobbler version
+      assert:
+        that:
+          - "'Cobbler' in cobbler_version.stdout"
+          - "'Error' not in cobbler_version.stdout"
+        fail_msg: "{{ cobbler_version_fail_msg }}"
+        success_msg: "{{ cobbler_version_success_msg }}"
+      tags: TC_005,VERIFY_005
+
+    - name: Run cobbler check command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler check
+      changed_when: false
+      register: cobbler_check
+      tags: TC_005,VERIFY_005
+
+    - name: Verify cobbler check command output
+      assert:
+        that:
+          - "'The following are potential configuration items that you may want to fix' not in cobbler_check.stdout"
+          - "'Error' not in cobbler_check.stdout"
+        fail_msg: "{{ cobbler_check_fail_msg }}"
+        success_msg: "{{ cobbler_check_success_msg }}"
+      ignore_errors: yes
+      tags: TC_005,VERIFY_005
+
+    - name: Run cobbler sync command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler sync
+      changed_when: false
+      register: cobbler_sync
+      tags: TC_005,VERIFY_005
+
+    - name: Verify cobbler sync command output
+      assert:
+        that:
+          - "'TASK COMPLETE' in cobbler_sync.stdout"
+          - "'Fail' not in cobbler_sync.stdout"
+          - "'Error' not in cobbler_sync.stdout"
+        fail_msg: "{{ cobbler_sync_fail_msg }}"
+        success_msg: "{{ cobbler_sync_success_msg }}"
+      tags: TC_005,VERIFY_005
+
+    - name: Fetch cobbler distro list
+      command: docker exec {{ docker_container_name }} cobbler distro list
+      changed_when: false
+      register: cobbler_distro_list
+      tags: TC_005,VERIFY_005
+
+    - name: Verify cobbler distro list
+      assert:
+        that:
+          - "'CentOS' in cobbler_distro_list.stdout"
+        fail_msg: "{{ cobbler_distro_list_fail_msg }}"
+        success_msg: "{{ cobbler_distro_list_success_msg }}"
+      tags: TC_005,VERIFY_005
+
+    - name: Fetch cobbler profile list
+      command: docker exec cobbler cobbler profile list
+      changed_when: false
+      register: cobbler_profile_list
+      tags: TC_005,VERIFY_005
+
+    - name: Verify cobbler profile list
+      assert:
+        that:
+          - "'CentOS' in cobbler_profile_list.stdout"
+        fail_msg: "{{ cobbler_profile_list_fail_msg }}"
+        success_msg: "{{ cobbler_profile_list_success_msg }}"
+      tags: TC_005,VERIFY_005
+
+    - name: Check kickstart file
+      shell: |
+        docker exec {{ docker_container_name }} [ -f /var/lib/cobbler/kickstarts/{{ kickstart_filename }} ] && echo "File exist" || echo "File does not exist"
+      changed_when: false
+      register: kickstart_file_status
+      tags: TC_005,VERIFY_005
+
+    - name: Verify kickstart file present
+      assert:
+        that:
+          - "'File exist' in kickstart_file_status.stdout"
+        fail_msg: "{{ kickstart_file_fail_msg }}"
+        success_msg: "{{ kickstart_file_success_msg }}"
+      tags: TC_005,VERIFY_005
+
+    - name: Check crontab list
+      command: docker exec cobbler crontab -l
+      changed_when: false
+      register: crontab_list
+      tags: TC_005,VERIFY_005
+
+    - name: Verify crontab list
+      assert:
+        that:
+          - "'* * * * * ansible-playbook /root/tftp.yml' in crontab_list.stdout"
+          - "'5 * * * * ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
+        fail_msg: "{{ crontab_list_fail_msg }}"
+        success_msg: "{{ crontab_list_success_msg }}"
+      tags: TC_005,VERIFY_005
+
+    - name: Check tftp,dhcpd,xinetd,cobblerd service is running
+      command: docker exec cobbler systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: cobbler_service_check
+      with_items: "{{ cobbler_services }}"
+      tags: TC_005,VERIFY_005
+
+    - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
+      assert:
+        that:
+          - "'active' in cobbler_service_check.results[{{ item }}].stdout"
+          - "'inactive' not in cobbler_service_check.results[{{ item }}].stdout"
+          - "'unknown' not in cobbler_service_check.results[{{ item }}].stdout"
+        fail_msg: "{{ cobbler_service_check_fail_msg }}"
+        success_msg: "{{ cobbler_service_check_success_msg }}"
+      with_sequence: start=0 end=3
+      tags: TC_005,VERIFY_005
+
+# Testcase OMNIA_DIO_US_CC_TC_006
+# Execute provision role in management station where already one container present
+- name: OMNIA_DIO_US_CC_TC_006
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_006
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_006
+
+    - name: Create docker image
+      docker_image:
+        name: ubuntu
+        tag: latest
+        source: pull
+      tags: TC_006
+
+    - name: Create docker container
+      command: docker run -dit ubuntu
+      register: create_docker_container
+      changed_when: true
+      args:
+        warn: false
+      tags: TC_006
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+      tags: TC_006
+
+    - name: Check the connection to cobbler UI and it returns a status 200
+      uri:
+        url: https://localhost/cobbler_web
+        status_code: 200
+        return_content: yes
+        validate_certs: no
+      tags: TC_006,VERIFY_006
+
+    - name: Fetch cobbler version in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler version
+      changed_when: false
+      register: cobbler_version
+      tags: TC_006,VERIFY_006
+
+    - name: Verify cobbler version
+      assert:
+        that:
+          - "'Cobbler' in cobbler_version.stdout"
+          - "'Error' not in cobbler_version.stdout"
+        fail_msg: "{{ cobbler_version_fail_msg }}"
+        success_msg: "{{ cobbler_version_success_msg }}"
+      tags: TC_006,VERIFY_006
+
+    - name: Run cobbler check command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler check
+      changed_when: false
+      register: cobbler_check
+      tags: TC_006,VERIFY_006
+
+    - name: Verify cobbler check command output
+      assert:
+        that:
+          - "'The following are potential configuration items that you may want to fix' not in cobbler_check.stdout"
+          - "'Error' not in cobbler_check.stdout"
+        fail_msg: "{{ cobbler_check_fail_msg }}"
+        success_msg: "{{ cobbler_check_success_msg }}"
+      ignore_errors: yes
+      tags: TC_006,VERIFY_006
+
+    - name: Run cobbler sync command in cobbler container
+      command: docker exec {{ docker_container_name }} cobbler sync
+      changed_when: false
+      register: cobbler_sync
+      tags: TC_006,VERIFY_006
+
+    - name: Verify cobbler sync command output
+      assert:
+        that:
+          - "'TASK COMPLETE' in cobbler_sync.stdout"
+          - "'Fail' not in cobbler_sync.stdout"
+          - "'Error' not in cobbler_sync.stdout"
+        fail_msg: "{{ cobbler_sync_fail_msg }}"
+        success_msg: "{{ cobbler_sync_success_msg }}"
+      tags: TC_006,VERIFY_006
+
+    - name: Fetch cobbler distro list
+      command: docker exec {{ docker_container_name }} cobbler distro list
+      changed_when: false
+      register: cobbler_distro_list
+      tags: TC_006,VERIFY_006
+
+    - name: Verify cobbler distro list
+      assert:
+        that:
+          - "'CentOS' in cobbler_distro_list.stdout"
+        fail_msg: "{{ cobbler_distro_list_fail_msg }}"
+        success_msg: "{{ cobbler_distro_list_success_msg }}"
+      tags: TC_006,VERIFY_006
+
+    - name: Fetch cobbler profile list
+      command: docker exec cobbler cobbler profile list
+      changed_when: false
+      register: cobbler_profile_list
+      tags: TC_006,VERIFY_006
+
+    - name: Verify cobbler profile list
+      assert:
+        that:
+          - "'CentOS' in cobbler_profile_list.stdout"
+        fail_msg: "{{ cobbler_profile_list_fail_msg }}"
+        success_msg: "{{ cobbler_profile_list_success_msg }}"
+      tags: TC_006,VERIFY_006
+
+    - name: Check kickstart file
+      shell: |
+        docker exec {{ docker_container_name }} [ -f /var/lib/cobbler/kickstarts/{{ kickstart_filename }} ] && echo "File exist" || echo "File does not exist"
+      changed_when: false
+      register: kickstart_file_status
+      tags: TC_006,VERIFY_006
+
+    - name: Verify kickstart file present
+      assert:
+        that:
+          - "'File exist' in kickstart_file_status.stdout"
+        fail_msg: "{{ kickstart_file_fail_msg }}"
+        success_msg: "{{ kickstart_file_success_msg }}"
+      tags: TC_006,VERIFY_006
+
+    - name: Check crontab list
+      command: docker exec cobbler crontab -l
+      changed_when: false
+      register: crontab_list
+      tags: TC_006,VERIFY_006
+
+    - name: Verify crontab list
+      assert:
+        that:
+          - "'* * * * * ansible-playbook /root/tftp.yml' in crontab_list.stdout"
+          - "'5 * * * * ansible-playbook /root/inventory_creation.yml' in crontab_list.stdout"
+        fail_msg: "{{ crontab_list_fail_msg }}"
+        success_msg: "{{ crontab_list_success_msg }}"
+      tags: TC_006,VERIFY_006
+
+    - name: Check tftp,dhcpd,xinetd,cobblerd service is running
+      command: docker exec cobbler systemctl is-active {{ item }}
+      changed_when: false
+      ignore_errors: yes
+      register: cobbler_service_check
+      with_items: "{{ cobbler_services }}"
+      tags: TC_006,VERIFY_006
+
+    - name: Verify tftp,dhcpd,xinetd,cobblerd service is running
+      assert:
+        that:
+          - "'active' in cobbler_service_check.results[{{ item }}].stdout"
+          - "'inactive' not in cobbler_service_check.results[{{ item }}].stdout"
+          - "'unknown' not in cobbler_service_check.results[{{ item }}].stdout"
+        fail_msg: "{{ cobbler_service_check_fail_msg }}"
+        success_msg: "{{ cobbler_service_check_success_msg }}"
+      with_sequence: start=0 end=3
+      tags: TC_006,VERIFY_006
+
+    - name: Delete the ubuntu container
+      docker_container:
+        name: "{{ create_docker_container.stdout }}"
+        state: absent
+      tags: TC_006
+
+    - name: Delete the ubuntu umage
+      docker_image:
+        name: ubuntu
+        state: absent
+      tags: TC_006
+
+# Testcase OMNIA_DIO_US_CC_TC_007
+# Execute provision role in management station and reboot management station
+- name: OMNIA_DIO_US_CC_TC_007
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Check last uptime of the server
+      command: uptime -s
+      register: uptime_status
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_007
+
+    - name: Check current date
+      command: date +"%Y-%m-%d %H"
+      register: current_time
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_007
+
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      when: current_time.stdout not in uptime_status.stdout
+      tags: TC_007
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      when: current_time.stdout not in uptime_status.stdout
+      tags: TC_007
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+      when: current_time.stdout not in uptime_status.stdout
+      tags: TC_007
+
+    - name: Reboot localhost
+      command: reboot
+      when: current_time.stdout not in uptime_status.stdout
+      tags: TC_007
+
+    - name: Inspect cobbler container
+      docker_container_info:
+        name: "{{ docker_container_name }}"
+      register: cobbler_cnt_status
+      tags: TC_007,VERIFY_007
+
+    - name: Verify cobbler container is running after reboot
+      assert:
+        that: "'running' in cobbler_cnt_status.container.State.Status"
+        fail_msg: "{{ cobbler_reboot_fail_msg }}"
+        success_msg: "{{ cobbler_reboot_success_msg }}"
+      tags: TC_007,VERIFY_007

+ 183 - 0
appliance/test/test_provision_cdip.yml

@@ -0,0 +1,183 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Testcase OMNIA_DIO_US_CDIP_TC_001
+# Execute provison role in management station with os installed centos 7
+- name: OMNIA_DIO_US_CDIP_TC_001
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_001
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_001
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+            tasks_from: "{{ item }}"
+          with_items:
+           - "{{ cobbler_image_files }}"
+      tags: TC_001
+
+    - name: Inspect cobbler docker image
+      docker_image_info:
+        name: "{{ docker_image_name }}"
+      register: cobbler_image_status
+      tags: TC_001,VERIFY_001
+
+    - name: Validate cobbler docker image
+      assert:
+        that:
+          - cobbler_image_status.images
+        fail_msg: "{{ cobbler_img_fail_msg }}"
+        success_msg: "{{ cobbler_img_success_msg }}"
+      tags: TC_001,VERIFY_001
+
+    - name: Inspect cobbler container
+      docker_container_info:
+        name: "{{ docker_container_name }}"
+      register: cobbler_cnt_status
+      tags: TC_001,VERIFY_001
+
+    - name: Validate cobbler docker container
+      assert:
+        that:
+          - cobbler_cnt_status.exists
+        fail_msg: "{{ cobbler_cnt_fail_msg }}"
+        success_msg: "{{ cobbler_cnt_success_msg }}"
+      tags: TC_001,VERIFY_001
+
+# Testcase OMNIA_DIO_US_CDIP_TC_002
+# Execute provison role in management station where cobbler container and image already created
+- name: OMNIA_DIO_US_CDIP_TC_002
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+      tags: TC_002
+
+    - name: Inspect cobbler docker image
+      docker_image_info:
+        name: "{{ docker_image_name }}"
+      register: cobbler_image_status
+      tags: TC_002,VERIFY_002
+
+    - name: Validate cobbler docker image
+      assert:
+        that:
+          - cobbler_image_status.images
+        fail_msg: "{{ cobbler_img_fail_msg }}"
+        success_msg: "{{ cobbler_img_success_msg }}"
+      tags: TC_002,VERIFY_002
+
+    - name: Inspect cobbler container
+      docker_container_info:
+        name: "{{ docker_container_name }}"
+      register: cobbler_cnt_status
+      tags: TC_002,VERIFY_002
+
+    - name: Validate cobbler docker container
+      assert:
+        that:
+          - cobbler_cnt_status.exists
+        fail_msg: "{{ cobbler_cnt_fail_msg }}"
+        success_msg: "{{ cobbler_cnt_success_msg }}"
+      tags: TC_002,VERIFY_002
+
+# Testcase OMNIA_DIO_US_CDIP_TC_003
+# Execute provison role in management station where docker service not running
+- name: OMNIA_DIO_US_CDIP_TC_003
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete the cobbler container if exits
+      docker_container:
+        name: "{{ docker_container_name }}"
+        state: absent
+      tags: TC_003
+
+    - name: Delete docker image if exists
+      docker_image:
+        name: "{{ docker_image_name }}"
+        tag: "{{ docker_image_tag }}"
+        state: absent
+      tags: TC_003
+
+    - name: Stop docker service
+      service:
+        name: docker
+        state: stopped
+      tags: TC_003
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Call provision role
+          include_role:
+            name: ../roles/provision
+
+        - name: Docker service stopped usecase success message
+          debug:
+            msg: "{{ docker_check_success_msg }}"
+
+      rescue:
+        - name: Docker service stopped usecase fail message
+          fail:
+            msg: "{{ docker_check_fail_msg }}"
+
+      always:
+        - name: Start docker service
+          service:
+            name: docker
+            state: started
+      tags: TC_003

+ 294 - 0
appliance/test/test_provision_ndod.yml

@@ -0,0 +1,294 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_DIO_US_NDOD_TC_009
+# Execute provison role in management station and  PXE boot one compute node
+- name: OMNIA_DIO_US_NDOD_TC_009
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/common/vars/main.yml
+  tasks:
+    - name: Set ip address of the compute node
+      set_fact:
+        single_node_ip_address: "{{ groups[cobbler_groupname][0] }}"
+      tags: TC_009,VERIFY_009
+
+    - name: Delete inventory if exists
+      file:
+        path: inventory
+        state: absent
+      tags: TC_009,VERIFY_009
+
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_009,VERIFY_009
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_path }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_009,VERIFY_009
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_009,VERIFY_009
+
+    - name: Create inventory file
+      lineinfile:
+        path: inventory
+        line: "{{ single_node_ip_address }} ansible_user=root ansible_password={{ provision_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
+        create: yes
+        mode: '{{ file_permission }}'
+      tags: TC_009,VERIFY_009
+
+    - meta: refresh_inventory
+      tags: TC_009,VERIFY_009
+
+    - name: Validate authentication of username and password
+      command: ansible {{ single_node_ip_address }} -m ping -i inventory
+      register: validate_login
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_009,VERIFY_009
+
+    - name: Validate the authentication output
+      assert:
+        that:
+          - "'pong' in validate_login.stdout"
+          - "'SUCCESS' in validate_login.stdout"
+          - "'UNREACHABLE' not in validate_login.stdout"
+        fail_msg: "{{ authentication_fail_msg }}"
+        success_msg: "{{ authentication_success_msg }}"
+      tags: TC_009,VERIFY_009
+
+    - name: Check hostname
+      command: ansible {{ single_node_ip_address }} -m shell -a hostname -i inventory
+      register: validate_hostname
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_009,VERIFY_009
+
+    - name: Validate the hostname
+      assert:
+        that: "'localhost' not in validate_hostname.stdout"
+        fail_msg: "{{ hostname_fail_msg }}"
+        success_msg: "{{ hostname_success_msg }}"
+      tags: TC_009,VERIFY_009
+
+    - name: Delete inventory if exists
+      file:
+        path: inventory
+        state: absent
+      tags: TC_009,VERIFY_009
+
+# OMNIA_DIO_US_NDOD_TC_010
+# Execute provison role in management station and PXE boot two compute node
+- name: OMNIA_DIO_US_NDOD_TC_010
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Delete inventory if exists
+      file:
+        path: inventory
+        state: absent
+      tags: TC_010,VERIFY_010
+
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_010,VERIFY_010
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_path }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_010,VERIFY_010
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_010,VERIFY_010
+
+    - name: Create inventory file
+      lineinfile:
+        path: inventory
+        line: "[nodes]"
+        create: yes
+        mode: '{{ file_permission }}'
+      tags: TC_010,VERIFY_010
+
+    - name: Edit inventory file
+      lineinfile:
+        path: inventory
+        line: "{{ item }} ansible_user=root ansible_password={{ provision_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
+      with_items:
+        - "{{ groups[cobbler_groupname] }}"
+      tags: TC_010,VERIFY_010
+
+    - meta: refresh_inventory
+      tags: TC_010,VERIFY_010
+
+    - name: Validate ip address is different for both servers
+      assert:
+        that: groups[cobbler_groupname][0] != groups[cobbler_groupname][1]
+        fail_msg: "{{ ip_address_fail_msg }}"
+        success_msg: "{{ ip_address_success_msg }}"
+      delegate_to: localhost
+      run_once: yes
+      tags: TC_010,VERIFY_010
+
+    - name: Check hostname of both servers
+      command: ansible nodes -m shell -a hostname -i inventory
+      register: node_hostname
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_010,VERIFY_010
+
+    - name: Validate hostname is different for both servers
+      assert:
+        that:
+          - node_hostname.stdout_lines[1] != node_hostname.stdout_lines[3]
+          - "'localhost' not in node_hostname.stdout_lines[1]"
+          - "'localhost' not in node_hostname.stdout_lines[3]"
+        fail_msg: "{{ hostname_fail_msg }}"
+        success_msg: "{{ hostname_success_msg }}"
+      delegate_to: localhost
+      run_once: yes
+      tags: TC_010,VERIFY_010
+
+    - name: Delete inventory if exists
+      file:
+        path: inventory
+        state: absent
+      delegate_to: localhost
+      run_once: yes
+      tags: TC_010,VERIFY_010
+
+# OMNIA_DIO_US_NDOD_TC_011
+# Validate passwordless ssh connection established or not with compute nodes
+- name: OMNIA_DIO_US_NDOD_TC_011
+  hosts: localhost
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_provision_vars.yml
+    - ../roles/provision/vars/main.yml
+  tasks:
+    - name: Validate authentication of username and password
+      command: "ansible {{ cobbler_groupname }} -m ping -i {{ inventory_file }}"
+      register: validate_login
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_011,VERIFY_011
+
+    - name: Validate the passwordless SSH connection
+      assert:
+        that:
+          - "'pong' in validate_login.stdout"
+          - "'SUCCESS' in validate_login.stdout"
+          - "'UNREACHABLE' not in validate_login.stdout"
+        success_msg: "{{ authentication_success_msg }}"
+        fail_msg: "{{ authentication_fail_msg }}"
+      tags: TC_011,VERIFY_011
+
+# OMNIA_DIO_US_NDOD_TC_012
+# Execute provison role in management station and reboot compute node after os provision again
+- name: OMNIA_DIO_US_NDOD_TC_012
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_provision_vars.yml
+  tasks:
+    - name: Set ip address of the compute node
+      set_fact:
+        single_node_ip_address: "{{ groups[cobbler_groupname][0] }}"
+      tags: TC_012,VERIFY_012
+
+    - name: Delete inventory if exists
+      file:
+        path: inventory
+        state: absent
+      tags: TC_012,VERIFY_012
+
+    - name: Check input config file is encrypted
+      command: cat {{ test_input_config_filename }}
+      changed_when: false
+      register: config_content
+      tags: TC_012,VERIFY_012
+
+    - name: Decrpyt input_config.yml
+      command: ansible-vault decrypt {{ test_input_config_filename }} --vault-password-file {{ vault_path }}
+      changed_when: false
+      when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+      tags: TC_012,VERIFY_012
+
+    - name: Include variable file input_config.yml
+      include_vars: "{{ test_input_config_filename }}"
+      tags: TC_012,VERIFY_012
+
+    - name: Create inventory file
+      lineinfile:
+        path: inventory
+        line: "[nodes]"
+        create: yes
+        mode: '{{ file_permission }}'
+      tags: TC_012,VERIFY_012
+
+    - name: Edit inventory file
+      lineinfile:
+        path: inventory
+        line: "{{ single_node_ip_address }} ansible_user=root ansible_password={{ provision_password }} ansible_ssh_common_args='-o StrictHostKeyChecking=no'"
+      tags: TC_012,VERIFY_012
+
+    - meta: refresh_inventory
+      tags: TC_012,VERIFY_012
+
+    - name: Reboot servers
+      command: ansible nodes -m command -a reboot -i inventory
+      ignore_errors: yes
+      changed_when: true
+      tags: TC_012,VERIFY_012
+
+    - name: Wait for 10 minutes
+      pause:
+        minutes: 10
+      tags: TC_012,VERIFY_012
+
+    - name: Check ip address of servers
+      command: ansible nodes -m command -a 'ip a' -i inventory
+      ignore_errors: yes
+      changed_when: false
+      register: ip_address_after_reboot
+      tags: TC_012,VERIFY_012
+
+    - name: Validate ip address is same after reboot
+      assert:
+        that: "'{{ single_node_ip_address }}' in ip_address_after_reboot.stdout"
+        fail_msg: "{{ ip_address_fail_msg }}"
+        success_msg: "{{ ip_address_success_msg }}"
+      tags: TC_012,VERIFY_012

+ 51 - 0
appliance/test/test_vars/test_common_vars.yml

@@ -0,0 +1,51 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# vars file for test_common.yml file
+centos_version: '7.8'
+test_input_config_filename: "appliance_config_test.yml"
+empty_input_config_filename: "appliance_config_empty.yml"
+new_input_config_filename: "appliance_config_new.yml"
+password_config_file: "password_config"
+min_length_password: "testpass"
+max_length_password: "helloworld123helloworld12hello"
+long_password: "helloworld123hellowordl12hello3"
+white_space_password: "hello world 123"
+special_character_password1: "hello-world/"
+special_character_password2: "hello@$%!world"
+valid_dhcp_start_range: "172.17.0.10"
+valid_dhcp_end_range: "172.17.0.200"
+invalid_dhcp_ip: "1720.1700.1000.1000"
+wrong_dhcp_ip: "d6:dh1:dsj:10"
+valid_iso_path: "/root/CentOS-7-x86_64-Minimal-2009.iso"
+wrong_iso_path: "/root/testfile"
+valid_mapping_file_path: "test_mapping_file"
+
+docker_volume_success_msg: "Docker volume omnia-storage exists"
+docker_volume_fail_msg: "Docker volume omnia-storage does not exist"
+input_config_success_msg: "Input config file is encrypted using ansible-vault successfully"
+input_config_fail_msg: "Input config file is failed to encrypt using ansible-vault"
+os_check_success_msg: "OS check passed"
+os_check_fail_msg: "OS check failed"
+internet_check_success_msg: "Internet connectivity check passed"
+internet_check_fail_msg: "Internet connectivity check failed"
+different_user_check_success_msg: "Different user execution check passed"
+different_user_check_fail_msg: "Different user execution check failed"
+selinux_check_success_msg: "selinux check passed"
+selinux_check_fail_msg: "selinux check failed"
+input_config_check_success_msg: "appliance_config.yml validation passed"
+input_config_check_fail_msg: "appliance_config.yml validation failed"
+install_package_success_msg: "Installation of package is successful"
+install_package_fail_msg: "Installation of package is failed"

+ 85 - 0
appliance/test/test_vars/test_provision_vars.yml

@@ -0,0 +1,85 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Usage: test_provision_cdip.yml
+first_nic: "em1"
+nic1_ip_address: 172.17.0.1
+validate_password_success_msg: "Password validation successful"
+validate_password_fail_msg: "Password validation failed"
+cobbler_img_fail_msg: "Docker image cobbler does not exist"
+cobbler_img_success_msg: "Docker image cobbler exists"
+cobbler_cnt_fail_msg: "Docker container cobbler does not exist"
+cobbler_cnt_success_msg: "Docker container cobbler exists"
+nic_check_fail_msg: "NIC-1 ip address validation failed"
+nic_check_success_msg: "NIC-1 ip address validation successful"
+cobbler_image_files:
+ - check_prerequisites
+ - mount_iso
+ - firewall_settings
+ - provision_password
+ - dhcp_configure
+ - cobbler_image
+password_config_file: "password_config"
+test_input_config_filename: "appliance_config_test.yml"
+
+# Usage: test_provision_cc.yml
+docker_check_success_msg: "Docker service stopped usescase validation successful"
+docker_check_fail_msg: "Docker service stopped usescase validation failed"
+docker_ip_fail_msg: "Docker IP validation failed"
+docker_ip_success_msg: "Docker IP validation successful"
+cobbler_version_fail_msg: "Cobbler version validation failed"
+cobbler_version_success_msg: "Cobbler version validation successful"
+cobbler_check_fail_msg: "Cobbler check validation failed"
+cobbler_check_success_msg: "Cobbler check validation successful"
+cobbler_sync_fail_msg: "Cobbler sync validation failed"
+cobbler_sync_success_msg: "Cobbler sync validation successful"
+cobbler_distro_list_fail_msg: "Cobbler distro list validation failed"
+cobbler_distro_list_success_msg: "Cobbler distro list validation successful"
+cobbler_profile_list_fail_msg: "Cobbler profile list validation failed"
+cobbler_profile_list_success_msg: "Cobbler profile list validation successful"
+kickstart_file_fail_msg: "Kickstart file validation failed"
+kickstart_file_success_msg: "Kickstart file validation successful"
+cobbler_reboot_fail_msg: "Cobbler container failed to start after reboot"
+cobbler_reboot_success_msg: "Cobbler container started successfully after reboot"
+crontab_list_fail_msg: "Crontab list validation failed"
+crontab_list_success_msg: "Crontab list validation successful"
+iso_check_fail_msg: "centos iso file check validation failed"
+iso_check_success_msg: "centos iso file check validation successful"
+cobbler_service_check_fail_msg: "cobbler service validation failed"
+cobbler_service_check_success_msg: "cobbler service validation successful"
+kickstart_filename: "centos7.ks"
+iso_file_path: "../roles/provision/files"
+temp_iso_name: "temp_centos.iso"
+cobbler_services:
+ - tftp
+ - dhcpd
+ - cobblerd
+ - xinetd
+
+# Usage: test_provision_cdip.yml, test_provision_cc.yml, test_provision_ndod.yml
+docker_container_name: "cobbler"
+boundary_password: "testpass"
+
+# Usage: test_provision_ndod.yml
+hostname_fail_msg: "Hostname validation failed"
+hostname_success_msg: "Hostname validation successful"
+authentication_fail_msg: "Server authentication validation failed"
+authentication_success_msg: "Server authentication validation successful"
+ip_address_fail_msg: "IP address validation failed"
+ip_address_success_msg: "IP address validation successful"
+cobbler_groupname: "all"
+inventory_file: "provisioned_hosts.yml"
+file_permission: 0644
+vault_path: ../roles/common/files/.vault_key

+ 35 - 0
appliance/test/test_vars/test_web_ui_vars.yml

@@ -0,0 +1,35 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Usage: test_web_ui.yml
+return_status: 200
+fail_return_status: -1
+awx_listening_port: 8081
+time: 1
+actual_containers: 4
+package: "docker-ce"
+awx_exists_msg: "Test case passed: AWX instance is already running on your system"
+awx_not_exists_msg: "Test case failed: AWX does not exist"
+validate_password_success_msg: "Test case passed: Password validation succesful"
+validate_password_fail_msg: "Test case failed: Password validation failed"
+resource_exists_success_msg: "Success: Requested resource(s) exists"
+resource_exists_fail_msg: "Failure: Requested resource(s) does not exists"
+compute_group_name: "compute"
+manager_group_name: "manager"
+tower_cli_package_name: "ansible-tower-cli"
+docker_container_name: "awx_web"
+container_up_status_success_msg: "Container is running successfully after the reboot"
+container_up_status_fail_msg: "Container is not running after the reboot"
+test_input_config_filename: appliance_config_test.yml

+ 378 - 0
appliance/test/test_web_ui.yml

@@ -0,0 +1,378 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# Testcase OMNIA_CRM_US_AWXD_TC_001
+# Test case to verify the prerequisites are installed and execute the AWX deployment
+- name: OMNIA_CRM_US_AWXD_TC_001
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - test_vars/test_web_ui_vars.yml
+  tasks:
+    - name: Get the docker package facts
+      package_facts:
+        manager: auto
+      tags: TC_001
+
+    - name: Check if docker-ce is already installed
+      debug:
+        var: ansible_facts.packages['{{ package }}']
+      tags: TC_001
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Calling the web_ui role to be tested
+          include_role:
+            name: ../roles/web_ui
+      tags: TC_001
+
+    - name: Check that you can connect to github repo and it returns a status 200
+      uri:
+        url: "{{ awx_git_repo }}"
+        status_code: "{{ return_status }}"
+        return_content: true
+      tags: TC_001
+
+    - name: Check that you can can connect to AWX UI and it returns a status 200
+      uri:
+        url: "{{ awx_ip }}"
+        status_code: "{{ return_status }}"
+        return_content: true
+      tags: TC_001
+
+    - name: verify awx-server is listening on 8081
+      wait_for:
+        port: "{{ awx_listening_port }}"
+        timeout: "{{ time }}"
+      tags: TC_001
+
+    - name: Get the containers count
+      shell: |
+        set -o pipefail
+        docker ps -a | grep awx | wc -l
+      register: containers_count
+      changed_when: False
+      tags: TC_001
+
+    - name: Validate the containers count
+      assert:
+        that: containers_count.stdout | int >= actual_containers
+        success_msg: "{{ awx_exists_msg }}"
+        fail_msg: "{{ awx_not_exists_msg }}"
+      tags: TC_001
+
+# Testcase OMNIA_CRM_US_AWXD_TC_002
+# Test case to verify regression testing
+- name: OMNIA_CRM_US_AWXD_TC_002
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - test_vars/test_web_ui_vars.yml
+  tasks:
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Calling the web_ui role to be tested
+          include_role:
+            name: ../roles/web_ui
+      tags: TC_002
+
+    - name: Check that you can connect to github repo and it returns a status 200
+      uri:
+        url: "{{ awx_git_repo }}"
+        status_code: "{{ return_status }}"
+        return_content: true
+      tags: TC_002
+
+    - name: Check that you can can connect to AWX UI and it returns a status 200
+      uri:
+        url: "{{ awx_ip }}"
+        status_code: "{{ return_status }}"
+        return_content: true
+      tags: TC_002
+
+    - name: verify awx-server is listening on 80
+      wait_for:
+        port: "{{ awx_listening_port }}"
+        timeout: "{{ time }}"
+      tags: TC_002
+
+    - name: Get the containers count
+      shell: |
+        set -o pipefail
+        docker ps -a | grep awx | wc -l
+      register: containers_count
+      changed_when: False
+      tags: TC_002
+
+    - name: Validate the containers count
+      assert:
+        that: containers_count.stdout | int >= actual_containers
+        success_msg: "{{ awx_exists_msg }}"
+        fail_msg: "{{ awx_not_exists_msg }}"
+      tags: TC_002
+
+# Testcase OMNIA_CRM_US_AWXD_TC_003
+# Test case to validate the AWX configuration
+- name: OMNIA_CRM_US_AWXD_TC_003
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - ../roles/web_ui/vars/main.yml
+    - ../roles/common/vars/main.yml
+    - test_vars/test_web_ui_vars.yml
+  tasks:
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Calling the web_ui role to be tested
+          include_role:
+            name: ../roles/web_ui
+      tags: TC_003
+
+    - name: Get the package facts
+      package_facts:
+        manager: auto
+      tags: TC_003
+
+    - name: Check if ansible-tower-cli is already installed
+      assert:
+        that: "'{{ tower_cli_package_name }}' in ansible_facts.packages"
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing organizations
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        organizations list -f human
+      register: organizations_array
+      changed_when: False
+      tags: TC_003
+
+    - name: Check for organization
+      assert:
+        that: organization_name in organizations_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing projects
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        projects list -f human
+      changed_when: False
+      register: projects_array
+      tags: TC_003
+
+    - name: Check for project
+      assert:
+        that: project_name in projects_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing inventories
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        inventory list -f human
+      changed_when: False
+      register: inventory_array
+      tags: TC_003
+
+    - name: Check for inventories
+      assert:
+        that: omnia_inventory_name in inventory_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing groups if omnia-inventory exists
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        groups list --inventory "{{ omnia_inventory_name }}" -f human
+      changed_when: False
+      register: groups_array
+      when: omnia_inventory_name in inventory_array.stdout
+      tags: TC_003
+
+    - name: Check for manager and compute groups
+      assert:
+        that: manager_group_name and compute_group_name in groups_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing credentials
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        credentials list -f human
+      changed_when: False
+      register: credentials_array
+      tags: TC_003
+
+    - name: Check for "{{ credential_name }}"
+      assert:
+        that: credential_name in credentials_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing job templates
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        job_templates list -f human
+      changed_when: False
+      register: templates_array
+      tags: TC_003
+
+    - name: Check for templates
+      assert:
+        that: omnia_template_name and inventory_template_name in templates_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+    - name: Get the existing schedules for job templates
+      command: >-
+        awx --conf.host "{{ awx_ip }}" --conf.username "{{ awx_user }}" --conf.password "{{ admin_password }}"
+        schedules list -f human
+      changed_when: False
+      register: schedules_array
+      tags: TC_003
+
+    - name: Check for schedules to job template
+      assert:
+        that: schedule_name in schedules_array.stdout
+        success_msg: "{{ resource_exists_success_msg }}"
+        fail_msg: "{{ resource_exists_fail_msg }}"
+      tags: TC_003
+
+# Testcase OMNIA_CRM_US_AWXD_TC_004
+# Execute common role in management station without internet connectivity
+- name: OMNIA_CRM_US_AWXD_TC_004
+  hosts: localhost
+  connection: local
+  gather_subset:
+    - 'min'
+  vars_files:
+    - test_vars/test_common_vars.yml
+    - ../roles/common/vars/main.yml
+  tasks:
+    - name: Down internet connectivity
+      lineinfile:
+        path: /etc/hosts
+        line: "172.16.0.5 github.com"
+        state: present
+        backup: yes
+      tags: TC_004
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Calling the web_ui role to be tested
+          include_role:
+            name: ../roles/web_ui
+
+      rescue:
+        - name: Validate internet connectivity failure message
+          assert:
+            that: internet_status in internet_value.msg
+            success_msg: "{{ internet_check_success_msg }}"
+            fail_msg: "{{ internet_check_fail_msg }}"
+      tags: TC_004
+
+    - name: Up internet connectivity
+      lineinfile:
+        path: /etc/hosts
+        line: "172.16.0.5 github.com"
+        state: absent
+      tags: TC_004
+
+# Testcase OMNIA_CRM_US_AWXD_TC_005
+# Execute web_ui role in management station and reboot the server
+- name: OMNIA_CRM_US_AWXD_TC_005
+  hosts: localhost
+  connection: local
+  vars_files:
+    - test_vars/test_web_ui_vars.yml
+  tasks:
+    - name: Get last uptime of the server
+      command: uptime -s
+      register: uptime_status
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_005
+
+    - name: Get current date
+      command: date +"%Y-%m-%d %H"
+      register: current_time
+      changed_when: false
+      ignore_errors: yes
+      tags: TC_005
+
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/common
+          vars:
+            input_config_filename: "{{ test_input_config_filename }}"
+
+        - name: Calling the web_ui role to be tested
+          include_role:
+            name: ../roles/web_ui
+      tags: TC_005
+
+    - name: Reboot localhost
+      command: reboot
+      when: current_time.stdout not in uptime_status.stdout
+      tags: TC_005
+
+    - name: Inspect AWX web container
+      docker_container_info:
+        name: "{{ docker_container_name }}"
+      register: awx_container_status
+      tags: TC_005
+
+    - name: Verify AWX container is running after reboot
+      assert:
+        that:
+          - "'running' in awx_container_status.container.State.Status"

+ 40 - 0
appliance/tools/passwordless_ssh.yml

@@ -0,0 +1,40 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Fetch provision_password
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  roles:
+    - fetch_password
+
+- name: Prepare the cluster with passwordless ssh from manager to compute
+  hosts: manager
+  gather_facts: false
+  pre_tasks:
+    - name: Set Fact
+      set_fact:
+        ssh_to: "{{ groups['compute'] }}"
+  roles:
+    - cluster_preperation
+
+- name: Prepare the cluster with passwordless ssh from compute to manager
+  hosts: compute
+  gather_facts: false
+  pre_tasks:
+    - name: Set Fact
+      set_fact:
+        ssh_to: "{{ groups['manager'] }}"
+  roles:
+    - cluster_preperation

+ 81 - 0
appliance/tools/provision_report.yml

@@ -0,0 +1,81 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+# This file used to generate a report of reachable and unreachable host of hpc cluster
+# This file can be executed only if provisioned_hosts.yml is created inside the path omnia/appliance/roles/inventory/files/provisioned_hosts.yml
+
+# Command to execute: ansible-playbook provision_report.yml -i ../roles/inventory/files/provisioned_hosts.yml
+
+- name: Fetch provision_password
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  roles:
+    - fetch_password
+
+- name: Find reachable hosts using ping
+  hosts: all
+  gather_facts: false
+  ignore_unreachable: true
+  ignore_errors: true
+  tasks:
+    - name: Check for reachable nodes
+      command: ping -c1 {{ inventory_hostname }}
+      delegate_to: localhost
+      register: ping_result
+      ignore_errors: yes
+      changed_when: false
+
+    - name: Refresh ssh keys
+      command: ssh-keygen -R {{ inventory_hostname }}
+      delegate_to: localhost
+      changed_when: false
+
+    - name: Group reachable hosts
+      group_by:
+        key: "reachable"
+      when: "'100% packet loss' not in ping_result.stdout"
+
+- name: Find reachable hosts using ssh
+  hosts: reachable
+  gather_facts: False
+  ignore_unreachable: true
+  remote_user: "root"
+  vars:
+    ansible_password: "{{ hostvars['127.0.0.1']['cobbler_password'] }}"
+    ansible_become_pass: "{{ hostvars['127.0.0.1']['cobbler_password'] }}"
+    ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+  tasks:
+    - name: Check ssh status
+      command: hostname
+      register: ssh_status
+      ignore_errors: yes
+
+    - name: Group unreachable ssh hosts
+      group_by:
+        key: "unreachable_ssh"
+      when: ssh_status.unreachable is defined and ssh_status.unreachable == true
+
+    - name: Group reachable ssh hosts
+      group_by:
+        key: "reachable_ssh"
+      when: (ssh_status.unreachable is defined and ssh_status.unreachable == false) or (ssh_status.rc is defined and ssh_status.rc == 0)
+
+- name: Display hosts list
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  roles:
+    - hpc_cluster_report

+ 36 - 0
appliance/tools/roles/cluster_preperation/tasks/main.yml

@@ -0,0 +1,36 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Set Facts
+  set_fact:
+    ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+
+- name: Disable host key checking
+  replace:
+    path: /etc/ssh/ssh_config
+    regexp: '#   StrictHostKeyChecking ask'
+    replace: 'StrictHostKeyChecking no'
+
+- name: Install sshpass
+  package:
+    name: sshpass
+    state: present
+
+- name: Verify and set passwordless ssh from manager to compute nodes
+  block:
+    - name: Execute on individual hosts
+      include_tasks: passwordless_ssh.yml
+      with_items: "{{ ssh_to }}"
+      loop_control:
+        pause: 5

+ 84 - 0
appliance/tools/roles/cluster_preperation/tasks/passwordless_ssh.yml

@@ -0,0 +1,84 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Initialize variables
+  set_fact:
+    ssh_status: false
+    current_host: "{{ item }}"
+
+- name: Refresh ssh-key if changed
+  command: ssh-keygen -R {{ current_host }}
+  changed_when: False
+  ignore_errors: yes
+  when: "'manager' in group_names"
+
+- name: Verify whether passwordless ssh is set on the remote host
+  command: ssh -o PasswordAuthentication=no root@"{{ current_host }}" 'hostname'
+  register: ssh_output
+  ignore_errors: yes
+  changed_when: False
+
+- name: Update ssh connection status
+  set_fact:
+    ssh_status: true
+  when: "'Permission denied' not in ssh_output.stderr"
+
+- name: Verify the public key file existence
+  stat:
+    path: "{{ rsa_id_file }}"
+  register: verify_rsa_id_file
+  when: not ssh_status
+
+- name: Generate ssh key pair
+  command: ssh-keygen -t rsa -b 4096 -f "{{ rsa_id_file }}" -q -N "{{ passphrase }}"
+  when:
+    - not ssh_status
+    - not verify_rsa_id_file.stat.exists
+
+- name: Add the key identity
+  shell: |
+    eval `ssh-agent -s`
+    ssh-add "{{ rsa_id_file }}"
+  when: not ssh_status
+
+- name: Post public key
+  block:
+    - name: Create .ssh directory
+      command: >-
+        sshpass -p "{{ hostvars['127.0.0.1']['cobbler_password'] }}"
+        ssh root@"{{ current_host }}" mkdir -p /root/.ssh
+      when: not ssh_status
+      no_log: True
+      register: register_error
+
+    - name: Copy the public key to remote host
+      shell: >-
+        set -o pipefail && cat "{{ rsa_id_file }}".pub
+        | sshpass -p "{{ hostvars['127.0.0.1']['cobbler_password'] }}"
+        ssh root@"{{ current_host }}" 'cat >> "{{ auth_key_path }}"'
+      when: not ssh_status
+      no_log: True
+      register: register_error
+
+    - name: Change permissions on the remote host
+      shell: sshpass -p "{{ hostvars['127.0.0.1']['cobbler_password'] }}" ssh root@"{{ current_host }}" 'chmod 700 .ssh; chmod 640 "{{ auth_key_path }}"'
+      when: not ssh_status
+      no_log: True
+      register: register_error
+
+  rescue:
+    - name: Passwordless ssh failed
+      fail:
+        msg: "{{ register_error.stderr | regex_replace(hostvars['127.0.0.1']['cobbler_password']) | regex_replace(auth_key_path) }}"

+ 19 - 0
appliance/tools/roles/cluster_preperation/vars/main.yml

@@ -0,0 +1,19 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+#Usage: passwordless_ssh.yml
+rsa_id_file: "/root/.ssh/id_rsa"
+passphrase: ""
+auth_key_path: "/root/.ssh/authorized_keys"

+ 44 - 0
appliance/tools/roles/fetch_password/tasks/main.yml

@@ -0,0 +1,44 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+- name: Include variables from common role
+  include_vars: "{{ role_path }}/../../../roles/common/vars/main.yml"
+  no_log: True
+
+- name: Check input config file is encrypted
+  command: cat {{ role_path }}/../../../{{ input_config_filename }}
+  changed_when: false
+  register: config_content
+
+- name: Decrpyt appliance_config.yml
+  command: >-
+    ansible-vault decrypt {{ role_path }}/../../../{{ input_config_filename }}
+    --vault-password-file {{ role_path }}/../../../{{ vault_filename }}
+  changed_when: false
+  when: "'$ANSIBLE_VAULT;' in config_content.stdout"
+
+- name: Include variable file appliance_config.yml
+  include_vars: "{{ role_path }}/../../../{{ input_config_filename }}"
+  no_log: true
+
+- name: Save input variables from file
+  set_fact:
+    cobbler_password: "{{ provision_password }}"
+  no_log: true
+
+- name: Encrypt input config file
+  command: >-
+    ansible-vault encrypt {{ role_path }}/../../../{{ input_config_filename }}
+    --vault-password-file {{ role_path }}/../../../{{ vault_filename }}
+  changed_when: false

+ 91 - 0
appliance/tools/roles/hpc_cluster_report/tasks/main.yml

@@ -0,0 +1,91 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Initialize reachable and unreachable host number
+  set_fact:
+    reachable_host_number: 0
+    unreachable_host_number: 0
+    unreachable_ssh_host_number: 0
+    unreachable_ping_host_number: 0
+
+- name: Set reachable host number
+  set_fact:
+    reachable_host_number: "{{ groups['reachable_ssh'] | length}}"
+  when: groups['reachable_ssh'] is defined
+
+- name: Set unreachable host number
+  set_fact:
+    unreachable_host_number: "{{ groups['ungrouped'] | length | int + groups['unreachable_ssh'] | length | int }}"
+    unreachable_ssh_host_number: "{{ groups['unreachable_ssh'] | length }}"
+    unreachable_ping_host_number: "{{ groups['ungrouped'] | length }}"
+  when: groups['unreachable_ssh'] is defined
+
+- name: Set unreachable host number
+  set_fact:
+    unreachable_host_number: "{{ groups['ungrouped'] | length }}"
+    unreachable_ping_host_number: "{{ groups['ungrouped'] | length }}"
+  when: groups['unreachable_ssh'] is not defined
+            
+- name: Create files folder
+  file:
+    path: "{{ role_path}}/files"
+    state: directory
+    
+- name: Copy dhcpd.leases from cobbler
+  command: docker cp cobbler:/var/lib/dhcpd/dhcpd.leases {{ role_path}}/files/dhcpd.leases
+  changed_when: true
+
+- name: Fetch ethernet details of unreachable hosts
+  shell: sed -n '/{{ item }}/,/ethernet/p' {{ role_path}}/files/dhcpd.leases | grep "ethernet" | awk '{ print $3 }' | uniq
+  register: ethernet_detail_unreachable_ping
+  changed_when: false
+  args:
+    warn: no
+  with_items:
+    - "{{ groups['ungrouped'] }}"
+
+- name: Fetch ethernet details of unreachable hosts
+  shell: sed -n '/{{ item }}/,/ethernet/p' {{ role_path}}/files/dhcpd.leases | grep "ethernet" | awk '{ print $3 }' | uniq
+  register: ethernet_detail_unreachable_ssh
+  changed_when: false
+  args:
+    warn: no
+  when: groups['unreachable_ssh'] is defined
+  with_items:
+    - "{{ groups['unreachable_ssh'] }}"
+
+- name: Fetch ethernet details of reachable hosts
+  shell: sed -n '/{{ item }}/,/ethernet/p' {{ role_path}}/files/dhcpd.leases | grep "ethernet" | awk '{ print $3 }' | uniq
+  register: ethernet_detail_reachable
+  changed_when: false
+  args:
+    warn: no
+  when: groups['reachable_ssh'] is defined
+  with_items:
+    - "{{ groups['reachable_ssh'] }}"
+
+- name: Copy host information to file
+  template:
+    src: provision_host_report.j2
+    dest: "{{ role_path}}/files/provision_host_report.txt"
+
+- name: Read provision host report
+  command: cat {{ role_path}}/files/provision_host_report.txt
+  register: host_report
+  changed_when: false
+
+- name: Display provision host report
+  debug:
+    var: host_report.stdout_lines

+ 38 - 0
appliance/tools/roles/hpc_cluster_report/templates/provision_host_report.j2

@@ -0,0 +1,38 @@
+HPC Cluster
+-----------
+Reachable Hosts:
+{% if reachable_host_number > 0 %}
+{% for host in groups['reachable_ssh'] %}
+{% if reachable_host_number == 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_reachable.stdout | replace(';','')}}
+{% elif reachable_host_number > 1 %}
+{% if ethernet_detail_reachable.results[loop.index|int - 1].stdout | length > 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_reachable.results[loop.index|int - 1].stdout | replace(';','')}}
+{% else %}
+  inet={{ host }}, link/ether=Refer to mapping file provided
+{% endif %}
+{% endif %}
+{% endfor %}
+{% endif %}
+Total reachable hosts: {{ reachable_host_number }}
+
+Unreachable Hosts:
+{% if unreachable_ping_host_number > 0 %}
+{% for host in groups['ungrouped'] %}
+{% if unreachable_ping_host_number == 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_unreachable_ping.stdout | replace(';','')}}
+{% elif unreachable_ping_host_number > 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_unreachable_ping.results[loop.index|int - 1].stdout | replace(';','')}}
+{% endif %}
+{% endfor %}
+{% endif %}
+{% if unreachable_ssh_host_number > 0 %}
+{% for host in groups['unreachable_ssh'] %}
+{% if unreachable_ssh_host_number == 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_unreachable_ssh.stdout | replace(';','')}}
+{% elif unreachable_ssh_host_number > 1 %}
+  inet={{ host }}, link/ether={{ ethernet_detail_unreachable_ssh.results[loop.index|int - 1].stdout | replace(';','')}}
+{% endif %}
+{% endfor %}
+{% endif %}
+Total unreachable hosts: {{ unreachable_host_number }}

+ 100 - 0
docs/FAQ.md

@@ -0,0 +1,100 @@
+# Frequently Asked Questions
+
+* TOC
+{:toc}
+
+## Why is the error "Wait for AWX UI to be up" displayed when `appliance.yaml` fails?  
+Cause: 
+1. When AWX is not accessible even after five minutes of wait time. 
+2. When __isMigrating__ or __isInstalling__ is seen in the failure message.
+	
+Resolution:  
+Wait for AWX UI to be accessible at http://\<management-station-IP>:8081, and then run the `appliance.yaml` file again, where __management-station-IP__ is the ip address of the management node.
+
+## What are the next steps after the nodes in a Kubernetes cluster reboots?  
+Resolution: 
+Wait for upto 15 minutes after the Kubernetes cluster reboots. Next, verify status of the cluster using the following services:
+* `kubectl get nodes` on the manager node provides correct k8s cluster status.  
+* `kubectl get pods --all-namespaces` on the manager node displays all the pods in the **Running** state.
+* `kubectl cluster-info` on the manager node displays both k8s master and kubeDNS are in the **Running** state.
+
+## What to do when the Kubernetes services are not in the __Running__  state?  
+Resolution:	
+1. Run `kubectl get pods --all-namespaces` to verify the pods are in the **Running** state.
+2. If the pods are not in the **Running** state, delete the pods using the command:`kubectl delete pods <name of pod>`
+3. Run the corresponding playbook that was used to install Kubernetes: `omnia.yml`, `jupyterhub.yml`, or `kubeflow.yml`.
+
+## What to do when the JupyterHub or Prometheus UI are not accessible?  
+Resolution:
+Run the command `kubectl get pods --namespace default` to ensure **nfs-client** pod and all prometheus server pods are in the **Running** state. 
+
+## While configuring the Cobbler, why does the `appliance.yml` fail with an error during the Run import command?  
+Cause:
+* When the mounted .iso file is corrupt.
+	
+Resolution:
+1. Go to __var__->__log__->__cobbler__->__cobbler.log__ to view the error.
+2. If the error message is **repo verification failed** then it signifies that the .iso file is not mounted properly.
+3. Verify if the downloaded .iso file is valid and correct.
+4. Delete the Cobbler container using `docker rm -f cobbler` and rerun `appliance.yml`.
+
+## Why does the PXE boot fail with tftp timeout or service timeout errors?  
+Cause:
+* When RAID is configured on the server.
+* When more than two servers in the same network have Cobbler services running.  
+
+Resolution:  
+1. Create a Non-RAID or virtual disk in the server.  
+2. Check if other systems except for the management node has cobblerd running. If yes, then stop the Cobbler container using the following commands: `docker rm -f cobbler` and `docker image rm -f cobbler`.
+
+## What to do when the Slurm services do not start automatically after the cluster reboots?  
+Resolution: 
+* Manually restart the slurmd services on the manager node by running the following commands:
+```
+systemctl restart slurmdbd
+systemctl restart slurmctld
+systemctl restart prometheus-slurm-exporter
+```
+* Run `systemctl status slurmd` to manually restart the following service on all the compute nodes.
+
+## What to do when the Slurm services fail? 
+Cause: The `slurm.conf` is not configured properly.  
+Resolution:
+1. Run the following commands:
+```
+slurmdbd -Dvvv
+slurmctld -Dvvv
+```
+2. Verify `/var/lib/log/slurmctld.log` file.
+
+## What to do when when the error "ports are unavailable" is displayed?
+Cause: Slurm database connection fails.  
+Resolution:
+1. Run the following commands:
+```
+slurmdbd -Dvvv
+slurmctld -Dvvv
+```
+2. Verify the `/var/lib/log/slurmctld.log` file.
+3. Verify: `netstat -antp | grep LISTEN`
+4. If PIDs are in the **Listening** state, kill the processes of that specific port.
+5. Restart all Slurm services:
+```
+slurmctl restart slurmctld on manager node
+systemctl restart slurmdbd on manager node
+systemctl restart slurmd on compute node
+```
+		
+## What to do if Kubernetes Pods are unable to communicate with the servers when the DNS servers are not responding?  
+Cause: With the host network which is DNS issue.  
+Resolution:
+1. In your Kubernetes cluster, run `kubeadm reset -f` on the nodes.
+2. In the management node, edit the `omnia_config.yml` file to change the Kubernetes Pod Network CIDR. Suggested IP range is 192.168.0.0/16 and ensure you provide an IP which is not in use in your host network.
+3. Execute omnia.yml and skip slurm using __skip_ tag __slurm__.
+
+## What to do if time taken to pull the images to create the Kubeflow containers exceeds the limit and the Apply Kubeflow configurations task fails?  
+Cause: Unstable or slow Internet connectivity.  
+Resolution:
+1. Complete the PXE booting/ format the OS on manager and compute nodes.
+2. In the omnia_config.yml file, change the k8s_cni variable value from calico to flannel.
+3. Run the Kubernetes and Kubeflow playbooks.

+ 0 - 105
docs/INSTALL.md

@@ -1,105 +0,0 @@
-## TL;DR Installation
- 
-### Kubernetes
-Install Kubernetes and all dependencies
-```
-ansible-playbook -i host_inventory_file kubernetes/kubernetes.yml
-```
-
-Initialize K8s cluster
-```
-ansible-playbook -i host_inventory_file kubernetes/kubernetes.yml --tags "init"
-```
-
-### Install Kubeflow 
-```
-ansible-playbook -i host_inventory_file kubernetes/kubeflow.yaml
-```
-
-### Slurm
-```
-ansible-playbook -i host_inventory_file slurm/slurm.yml
-```
-
-# Omnia  
-Omnia is a collection of [Ansible](https://www.ansible.com/) playbooks which perform:
-* Installation of [Slurm](https://slurm.schedmd.com/) and/or [Kubernetes](https://kubernetes.io/) on servers already provisioned with a standard [CentOS](https://www.centos.org/) image.
-* Installation of auxiliary scripts for administrator functions such as moving nodes between Slurm and Kubernetes personalities.
-
-Omnia playbooks perform several tasks:
-`common` playbook handles installation of software 
-* Add yum repositories:
-    - Kubernetes (Google)
-    - El Repo (for Nvidia drivers)
-    - EPEL (Extra Packages for Enterprise Linux)
-* Install Packages from repos:
-    - bash-completion
-    - docker
-    - gcc
-    - python-pip
-    - kubelet
-    - kubeadm
-    - kubectl
-    - nfs-utils
-    - nvidia-detect
-    - yum-plugin-versionlock
-* Restart and enable system level services
-    - Docker
-    - Kubelet
-
-`computeGPU` playbook installs Nvidia drivers and nvidia-container-runtime-hook
-* Add yum repositories:
-    - Nvidia (container runtime)
-* Install Packages from repos:
-    - kmod-nvidia
-    - nvidia-container-runtime-hook
-* Restart and enable system level services
-    - Docker
-    - Kubelet
-* Configuration:
-    - Enable GPU Device Plugins (nvidia-container-runtime-hook)
-    - Modify kubeadm config to allow GPUs as schedulable resource 
-* Restart and enable system level services
-    - Docker
-    - Kubelet
-
-`master` playbook
-* Install Helm v3
-* (optional) add firewall rules for Slurm and kubernetes
-
-Everything from this point on can be called by using the `init` tag
-```
-ansible-playbook -i host_inventory_file kubernetes/kubernetes.yml --tags "init"
-```
-
-`startmaster` playbook
-* turn off swap
-*Initialize Kubernetes
-    * Head/master
-        - Start K8S pass startup token to compute/slaves
-        - Initialize software defined networking (Calico)
-
-`startworkers` playbook
-* turn off swap
-* Join k8s cluster
-
-`startservices` playbook
-* Setup K8S Dashboard
-* Add `stable` repo to helm
-* Add `jupyterhub` repo to helm
-* Update helm repos
-* Deploy NFS client Provisioner
-* Deploy Jupyterhub
-* Deploy Prometheus
-* Install MPI Operator
-
-
-### Slurm
-* Downloads and builds Slurm from source
-* Install package dependencies
-    - Python3
-    - munge
-    - MariaDB
-    - MariaDB development libraries
-* Build Slurm configuration files
-

+ 117 - 0
docs/INSTALL_OMNIA.md

@@ -0,0 +1,117 @@
+# Install Omnia using CLI
+
+The following sections provide details on installing Omnia using CLI. If you want to install the Omnia appliance and manage workloads using the Omnia appliance, see [Install the Omnia appliance](INSTALL_OMNIA_APPLIANCE.md) and [Monitor Kubernetes and Slurm](MONITOR_CLUSTERS.md) for more information.
+
+## Prerequisites
+* Ensure that all the prerequisites listed in the [Preparation to install Omnia](PREINSTALL_OMNIA.md) are met before installing Omnia.
+* If there are errors when any of the following Ansible playbook commands are run, re-run the commands again. 
+* The user should have root privileges to perform installations and configurations.
+ 
+## Install Omnia using CLI
+
+1. Clone the Omnia repository:
+``` 
+git clone https://github.com/dellhpc/omnia.git 
+```
+__Note:__ After the Omnia repository is cloned, a folder named __omnia__ is created. Ensure that you do not rename this folder.
+
+2. Change the directory to __omnia__: `cd omnia`
+
+3. An inventory file must be created in the __omnia__ folder. Add compute node IPs under **[compute]** group and the manager node IP under **[manager]** group. See the INVENTORY template file under `omnia\docs` folder.
+
+4. To install Omnia:
+```
+ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2" 
+```
+
+5. By default, no skip tags are selected, and both Kubernetes and Slurm will be deployed.
+
+To skip the installation of Kubernetes, enter:  
+`ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"  --skip-tags "kubernetes"` 
+
+To skip the installation of Slurm, enter:  
+`ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"  --skip-tags "slurm"`  
+
+To skip the NFS client setup, enter the following command to skip the k8s_nfs_client_setup role of Kubernetes:  
+`ansible-playbook omnia.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"  --skip-tags "nfs_client"`
+
+6. To provide passwords for mariaDB Database (for Slurm accounting), Kubernetes Pod Network CIDR, and Kubernetes CNI, edit the `omnia_config.yml` file.  
+__Note:__ 
+* Supported values for Kubernetes CNI are calico and flannel. The default value of CNI considered by Omnia is calico. 
+* The default value of Kubernetes Pod Network CIDR is 10.244.0.0/16. If 10.244.0.0/16 is already in use within your network, select a different Pod Network CIDR. For more information, see __https://docs.projectcalico.org/getting-started/kubernetes/quickstart__.
+
+To view the set passwords of omnia_config.yml at a later time:  
+`ansible-vault view omnia_config.yml --vault-password-file .omnia_vault_key`
+
+Omnia considers `slurm` as the default username for MariaDB.  
+
+## Kubernetes roles
+
+The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file is run:
+- __common__ role:
+	- Install common packages on manager and compute nodes
+	- Docker is installed
+	- Deploy time ntp/chrony
+	- Install Nvidia drivers and software components
+- **k8s_common** role: 
+	- Required Kubernetes packages are installed
+	- Starts the docker and Kubernetes services.
+- **k8s_manager** role: 
+	- __helm__ package for Kubernetes is installed.
+- **k8s_firewalld** role: This role is used to enable the required ports to be used by Kubernetes. 
+	- For __head-node-ports__: 6443,2379-2380,10251,10250,10252
+	- For __compute-node-ports__: 10250,30000-32767
+	- For __calico-udp-ports__: 4789
+	- For __calico-tcp-ports__: 5473,179
+	- For __flanel-udp-ports__: 8285,8472
+- **k8s_nfs_server_setup** role: 
+	- A __nfs-share__ directory, `/home/k8snfs`, is created. Using this directory, compute nodes share the common files.
+- **k8s_nfs_client_setup** role
+- **k8s_start_manager** role: 
+	- Runs the __/bin/kubeadm init__ command to initialize the Kubernetes services on manager node.
+	- Initialize the Kubernetes services in the manager node and create service account for Kubernetes Dashboard
+- **k8s_start_workers** role: 
+	- The compute nodes are initialized and joined to the Kubernetes cluster with the manager node. 
+- **k8s_start_services** role
+	- Kubernetes services are deployed such as Kubernetes Dashboard, Prometheus, MetalLB and NFS client provisioner
+
+__Note:__ 
+* After Kubernetes is installed and configured, few Kubernetes and calico/flannel related ports are opened in the manager and compute nodes. This is required for Kubernetes Pod-to-Pod and Pod-to-Service communications. Calico/flannel provides a full networking stack for Kubernetes pods.
+* If Kubernetes Pods are unable to communicate with the servers when the DNS servers are not responding, then the Kubernetes Pod Network CIDR may be overlapping with the host network which is DNS issue. To resolve this issue follow the below steps:
+1. In your Kubernetes cluster, run `kubeadm reset -f` on the nodes.
+2. In the management node, edit the `omnia_config.yml` file to change the Kubernetes Pod Network CIDR. Suggested IP range is 192.168.0.0/16 and ensure you provide an IP which is not in use in your host network.
+3. Execute omnia.yml and skip slurm using --skip-tags slurm.
+
+## Slurm roles
+
+The following __Slurm__ roles are provided by Omnia when __omnia.yml__ file is run:
+- **slurm_common** role:
+	- Installs the common packages on manager node and compute node.
+- **slurm_manager** role:
+	- Installs the packages only related to manager node
+	- This role also enables the required ports to be used by Slurm.  
+	    **tcp_ports**: 6817,6818,6819  
+		**udp_ports**: 6817,6818,6819
+	- Creating and updating the Slurm configuration files based on the manager node requirements.
+- **slurm_workers** role:
+	- Installs the Slurm packages into all compute nodes as per the compute node requirements.
+- **slurm_start_services** role: 
+	- Starting the Slurm services so that compute node communicates with manager node.
+- **slurm_exporter** role: 
+	- Slurm exporter is a package for exporting metrics collected from Slurm resource scheduling system to prometheus.
+	- Slurm exporter is installed on the host like Slurm, and Slurm exporter will be successfully installed only if Slurm is installed.
+
+**Note:** If you want to install JupyterHub and Kubeflow playbooks, you have to first install the JupyterHub playbook and then install the Kubeflow playbook.
+
+Commands to install JupyterHub and Kubeflow:
+* `ansible-playbook platforms/jupyterhub.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2"`
+* `ansible-playbook platforms/kubeflow.yml -i inventory -e "ansible_python_interpreter=/usr/bin/python2" `
+
+__Note:__ When the Internet connectivity is unstable or slow, it may take more time to pull the images to create the Kubeflow containers. If the time limit is exceeded, the **Apply Kubeflow configurations** task may fail. To resolve this issue, you must redeploy Kubernetes cluster and reinstall Kubeflow by completing the following steps:
+* Format the OS on manager and compute nodes.
+* In the `omnia_config.yml` file, change the k8s_cni variable value from calico to flannel.
+* Run the Kubernetes and Kubeflow playbooks.
+
+## Add a new compute node to the cluster
+
+To update the INVENTORY file present in `omnia` directory with the new node IP address under the compute group. Ensure the other nodes which are already a part of the cluster are also present in the compute group along with the new node. Then, run`omnia.yml` to add the new node to the cluster and update the configurations of the manager node.

+ 189 - 0
docs/INSTALL_OMNIA_APPLIANCE.md

@@ -0,0 +1,189 @@
+# Install the Omnia appliance
+
+## Prerequisites
+* Ensure that all the prerequisites listed in the [Prerequisites to install the Omnia appliance](PREINSTALL_OMNIA_APPLIANCE.md) file are met before installing the Omnia appliance.
+* After the installation of the Omnia appliance, changing the manager node is not supported. If you need to change the manager node, you must redeploy the entire cluster.  
+* You must have root privileges to perform installations and configurations using the Omnia appliance.
+* If there are errors when any of the following Ansible playbook commands are run, re-run the commands again.
+
+## Steps to install the Omnia appliance
+
+1. On the management node, change the working directory to the directory where you want to clone the Omnia Git repository.
+2. Clone the Omnia repository:
+``` 
+git clone https://github.com/dellhpc/omnia.git 
+```
+3. Change the directory to __omnia__: `cd omnia`
+4. Edit the `omnia_config.yml` file to:
+* Provide passwords for mariaDB Database (for Slurm accounting), Kubernetes Pod Network CIDR, Kubernetes CNI under `mariadb_password` and `k8s_cni` respectively.  
+__Note:__ 
+* Supported values for Kubernetes CNI are calico and flannel. The default value of CNI considered by Omnia is calico.	
+* The default value of Kubernetes Pod Network CIDR is 10.244.0.0/16. If 10.244.0.0/16 is already in use within your network, select a different Pod Network CIDR. For more information, see __https://docs.projectcalico.org/getting-started/kubernetes/quickstart__.
+
+5. Run `ansible-vault view omnia_config.yml --vault-password-file .omnia_vault_key` to view the set passwords of __omnia_config.yml__.
+6. Change the directory to __omnia__->__appliance__: `cd omnia/appliance`
+7. Edit the `appliance_config.yml` file to:  
+	a. Provide passwords for Cobbler and AWX under `provision_password` and `awx_password` respectively.  
+	__Note:__ Minimum length of the password must be at least eight characters and a maximum of 30 characters. Do not use these characters while entering a password: -, \\, "", and \'  
+	
+	b. Change the NIC for the DHCP server under `hpc_nic`, and the NIC used to connect to the Internet under `public_nic`. The default values of **hpc_nic** and **public_nic** are set to em1 and em2 respectively.  
+	
+	c. Provide the CentOS-7-x86_64-Minimal-2009 ISO file path under `iso_file_path`. This ISO file is used by Cobbler to provision the OS on the compute nodes.  
+	__Note:__ It is recommended that the ISO image file is not renamed. And, you **must not** change the path of this ISO image file as the provisioning of the OS on the compute nodes may be impacted.
+	
+	d. Provide a mapping file for DHCP configuration under `mapping_file_path`. The **mapping_file.csv** template file is present under `omnia/examples`. Enter the details in the order: `MAC, Hostname, IP`. The header in the template file must not be deleted before saving the file.  
+	If you want to continue without providing a mapping file, leave the `mapping_file_path` value as blank.  
+	__Note:__ Ensure that duplicate values are not provided for MAC, Hostname, and IP in the mapping file. The Hostname should not contain the following characters: , (comma), \. (period), and - (hyphen).
+	
+	e. Provide valid DHCP range for HPC cluster under the variables `dhcp_start_ip_range` and `dhcp_end_ip_range`. 
+	
+8. Run `ansible-vault view appliance_config.yml --vault-password-file .vault_key` to view the set passwords of __appliance_config.yml__.
+
+Omnia considers the following usernames as default:  
+* `cobbler` for Cobbler Server
+* `admin` for AWX
+* `slurm` for MariaDB
+
+9. Run `ansible-playbook appliance.yml -e "ansible_python_interpreter=/usr/bin/python2"` to install Omnia appliance.
+
+   
+Omnia creates a log file which is available at: `/var/log/omnia.log`.
+
+## Provision operating system on the target nodes 
+Omnia role used: *provision*  
+Ports used by Cobbler:  
+* TCP ports: 80,443,69
+* UDP ports: 69,4011
+
+To create the Cobbler image, Omnia configures the following:
+* Firewall settings.
+* The kickstart file of Cobbler which will enable the UEFI PXE boot.
+
+To access the Cobbler dashboard, enter `https://<IP>/cobbler_web` where `<IP>` is the Global IP address of the management node. For example, enter
+`https://100.98.24.225/cobbler_web` to access the Cobbler dashboard.
+
+__Note__: After the Cobbler Server provisions the operating system on the nodes, IP addresses and host names are assigned by the DHCP service.  
+* If a mapping file is not provided, the hostname to the server is provided based on the following format: **computexxx-xxx** where "xxx-xxx" is the last two octets of Host IP address. For example, if the Host IP address is 172.17.0.11 then the assigned hostname by Omnia is compute0-11.  
+* If a mapping file is provided, the hostnames follow the format provided in the mapping file.  
+
+__Note__: If you want to add more nodes, append the new nodes in the existing mapping file. However, do not modify the previous nodes in the mapping file as it may impact the existing cluster.  
+
+## Install and configure Ansible AWX 
+Omnia role used: *web_ui*  
+The port used by AWX is __8081__.  
+The AWX repository is cloned from the GitHub path: https://github.com/ansible/awx.git 
+
+Omnia performs the following configurations on AWX:
+* The default organization name is set to **Dell EMC**.
+* The default project name is set to **omnia**.
+* The credentials are stored in the **omnia_credential**.
+* Two groups, namely compute and manager groups, are provided under **omnia_inventory**. You can add hosts to these groups using the AWX UI. 
+* Pre-defined templates are provided: **DeployOmnia** and **DynamicInventory**
+* **DynamicInventorySchedule** which is scheduled to run every 10 minutes updates the inventory details dynamically. 
+
+To access the AWX dashboard, enter `http://<IP>:8081` where **\<IP>** is the Global IP address of the management node. For example, enter `http://100.98.24.225:8081` to access the AWX dashboard.
+
+**Note**: The AWX configurations are automatically performed Omnia and Dell Technologies recommends that you do not change the default configurations provided by Omnia as the functionality may be impacted.
+
+__Note__: Although AWX UI is accessible, hosts will be shown only after few nodes have been provisioned by Cobbler. It takes approximately 10 to 15 minutes to display the host details after the provisioning by Cobbler. If a server is provisioned but you are unable to view the host details on the AWX UI, then you can run the following command from __omnia__ -> __appliance__ ->__tools__ folder to view the hosts which are reachable.
+```
+ansible-playbook -i ../roles/inventory/provisioned_hosts.yml provision_report.yml
+```
+
+## Install Kubernetes and Slurm using AWX UI
+Kubernetes and Slurm are installed by deploying the **DeployOmnia** template on the AWX dashboard.
+
+1. On the AWX dashboard, under __RESOURCES__ __->__ __Inventories__, select **omnia_inventory**.
+2. Select __GROUPS__, and then select either __compute__ or __manager__ group.
+3. Select the __HOSTS__ tab.
+4. To add the hosts provisioned by Cobbler, click **+**, and then select **Existing Host**. 
+5. Select the hosts from the list and click __SAVE__.
+6. To deploy Omnia, under __RESOURCES__ -> __Templates__, select __DeployOmnia__, and then click __LAUNCH__.
+7. By default, no skip tags are selected and both Kubernetes and Slurm will be deployed. 
+8. To install only Kubernetes, enter `slurm` and select **slurm**. 
+9. To install only Slurm, select and add `kubernetes` skip tag. 
+
+__Note:__
+*	If you would like to skip the NFS client setup, enter `nfs_client` in the skip tag section to skip the **k8s_nfs_client_setup** role of Kubernetes.
+
+10. Click **NEXT**.
+11. Review the details in the **PREVIEW** window, and click **LAUNCH** to run the DeployOmnia template. 
+
+__Note:__ If you want to install __JupyterHub__ and __Kubeflow__ playbooks, you have to first install the __JupyterHub__ playbook and then install the __Kubeflow__ playbook.
+
+__Note:__ To install __JupyterHub__ and __Kubeflow__ playbooks:
+*	From AWX UI, under __RESOURCES__ -> __Templates__, select __DeployOmnia__ template.
+*	From __PLAYBOOK__ dropdown menu, select __platforms/jupyterhub.yml__ and launch the template to install JupyterHub playbook.
+*	From __PLAYBOOK__ dropdown menu, select __platforms/kubeflow.yml__ and launch the template to install Kubeflow playbook.
+
+__Note:__ When the Internet connectivity is unstable or slow, it may take more time to pull the images to create the Kubeflow containers. If the time limit is exceeded, the **Apply Kubeflow configurations** task may fail. To resolve this issue, you must redeploy Kubernetes cluster and reinstall Kubeflow by completing the following steps:
+* Complete the PXE booting of the manager and compute nodes.
+* In the `omnia_config.yml` file, change the k8s_cni variable value from calico to flannel.
+* Run the Kubernetes and Kubeflow playbooks.
+
+The DeployOmnia template may not run successfully if:
+- The Manager group contains more than one host.
+- The Compute group does not contain a host. Ensure that the Compute group is assigned with at least one host node.
+- Under Skip Tags, when both kubernetes and slurm tags are selected.
+
+After **DeployOmnia** template is run from the AWX UI, the **omnia.yml** file installs Kubernetes and Slurm, or either Kubernetes or slurm, as per the selection in the template on the management node. Additionally, appropriate roles are assigned to the compute and manager groups.
+
+## Kubernetes roles
+
+The following __kubernetes__ roles are provided by Omnia when __omnia.yml__ file is run:
+- __common__ role:
+	- Install common packages on manager and compute nodes
+	- Docker is installed
+	- Deploy time ntp/chrony
+	- Install Nvidia drivers and software components
+- **k8s_common** role: 
+	- Required Kubernetes packages are installed
+	- Starts the docker and Kubernetes services.
+- **k8s_manager** role: 
+	- __helm__ package for Kubernetes is installed.
+- **k8s_firewalld** role: This role is used to enable the required ports to be used by Kubernetes. 
+	- For __head-node-ports__: 6443, 2379-2380,10251,10250,10252
+	- For __compute-node-ports__: 10250,30000-32767
+	- For __calico-udp-ports__: 4789
+	- For __calico-tcp-ports__: 5473,179
+	- For __flanel-udp-ports__: 8285,8472
+- **k8s_nfs_server_setup** role: 
+	- A __nfs-share__ directory, `/home/k8snfs`, is created. Using this directory, compute nodes share the common files.
+- **k8s_nfs_client_setup** role
+- **k8s_start_manager** role: 
+	- Runs the __/bin/kubeadm init__ command to initialize the Kubernetes services on manager node.
+	- Initialize the Kubernetes services in the manager node and create service account for Kubernetes Dashboard
+- **k8s_start_workers** role: 
+	- The compute nodes are initialized and joined to the Kubernetes cluster with the manager node. 
+- **k8s_start_services** role
+	- Kubernetes services are deployed such as Kubernetes Dashboard, Prometheus, MetalLB and NFS client provisioner
+
+__Note:__ 
+* After Kubernetes is installed and configured, few Kubernetes and calico/flannel related ports are opened in the manager and compute nodes. This is required for Kubernetes Pod-to-Pod and Pod-to-Service communications. Calico/flannel provides a full networking stack for Kubernetes pods.
+* If Kubernetes Pods are unable to communicate with the servers when the DNS servers are not responding, then the Kubernetes Pod Network CIDR may be overlapping with the host network which is DNS issue. To resolve this issue:
+1. In your Kubernetes cluster, run `kubeadm reset -f` on the nodes.
+2. In the management node, edit the `omnia_config.yml` file to change the Kubernetes Pod Network CIDR. Suggested IP range is 192.168.0.0/16 and ensure you provide an IP which is not in use in your host network.
+3. Execute omnia.yml and skip slurm using --skip-tags slurm
+ 
+## Slurm roles
+
+The following __Slurm__ roles are provided by Omnia when __omnia.yml__ file is run:
+- **slurm_common** role:
+	- Installs the common packages on manager node and compute node.
+- **slurm_manager** role:
+	- Installs the packages only related to manager node
+	- This role also enables the required ports to be used by Slurm.  
+	    **tcp_ports**: 6817,6818,6819  
+		**udp_ports**: 6817,6818,6819
+	- Creating and updating the Slurm configuration files based on the manager node requirements.
+- **slurm_workers** role:
+	- Installs the Slurm packages into all compute nodes as per the compute node requirements.
+- **slurm_start_services** role: 
+	- Starting the Slurm services so that communicates with manager node.
+- **slurm_exporter** role: 
+	- Slurm exporter is a package for exporting metrics collected from Slurm resource scheduling system to prometheus.
+	- Slurm exporter is installed on the host like Slurm, and Slurm exporter will be successfully installed only if Slurm is installed.
+
+## Add a new compute node to the Cluster
+
+If a new node is provisioned through Cobbler, the node address is automatically displayed on the AWX dashboard. The node is not assigned to any group. You can add the node to the compute group along with the existing nodes and run `omnia.yml` to add the new node to the cluster and update the configurations in the manager node.

+ 6 - 0
docs/INVENTORY

@@ -0,0 +1,6 @@
+[compute]
+compute-01
+compute-02
+
+[manager]
+manager-01

+ 93 - 0
docs/MONITOR_CLUSTERS.md

@@ -0,0 +1,93 @@
+# Monitor Kuberentes and Slurm
+Omnia provides playbooks to configure additional software components for Kubernetes such as JupyterHub and Kubeflow. For workload management (submitting, conrolling, and managing jobs) of HPC, AI, and Data Analytics clusters, you can access Kubernetes and Slurm dashboards and other supported applications. 
+
+To access any of the dashboards login to the manager node and open the installed web browser.
+
+If you are connecting remotely ensure your putty or any X11 based clients and you are using mobaxterm version 8 and above, follow the below mentioned steps:
+
+1. To provide __ssh__ to the manager node.
+   `ssh -x root@<ip>` (where IP is the private IP of manager node)
+2. `yum install firefox -y`
+3. `yum install xorg-x11-xauth`
+4. `export DISPLAY=:10.0`
+5. `logout and login back`
+6. To launch firefox from terminal use the following command: 
+   `firefox&`
+
+__Note:__ When the putty/mobaxterm session ends, you must run __export DISPLAY=:10.0__ command each time, else Firefox cannot be launched again.
+
+## Setup user account in manager node
+1. Login to head node as root user and run `adduser __<username>__`.
+2. Run `passwd __<username>__` to set password.
+3. Run `usermod -a -G wheel __<username>__` to give sudo permission.
+
+__Note:__ Kuberenetes and Slurm job can be scheduled only for users with __sudo__ privileges.
+
+## Access Kuberentes Dashboard
+1. To verify if the __Kubernetes-dashboard service__ is __running__, run `kubectl get pods --namespace kubernetes-dashboard`.
+2. To start the Kubernetes dashboard, run `kubectl proxy`.
+3. From the CLI, run `kubectl get secrets` to see the generated tokens.
+4. Copy the token with the name __prometheus-__-kube-state-metrics__ of the type __kubernetes.io/service-account-token__.
+5. Run `kubectl describe secret __<copied token name>__`
+6. Copy the encrypted token value.
+7. On a web browser(installed on the manager node), enter http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ to access the Kubernetes Dashboard.
+8. Select the authentication method as __Token__.
+9. On the Kuberenetes Dashboard, paste the copied encrypted token and click __Sign in__.
+
+## Access Kubeflow Dashboard
+
+It is recommended that you use port numbers between __8000-8999__ and the suggested port number is __8085__.
+
+1. To view the ports which are in use, run the following command:
+   `netstat -an`
+2. Select a port number between __8000-8999__ which is not in use.
+3. To run the **Kubeflow Dashboard** at selected port number, run one of the following commands:  
+	`kubectl port-forward -n kubeflow service/centraldashboard __selected_port_number__:80`  
+	(Or)  
+	`kubectl port-forward -n istio-system svc/istio-ingressgateway __selected_port_number__:80`
+4. On a web browser installed on the manager node, go to http://localhost:selected-port-number/ to launch the Kubeflow Central Dashboard.  
+
+For more information about the Kubeflow Central Dashboard, see https://www.kubeflow.org/docs/components/central-dash/overview/.
+
+## Access JupyterHub Dashboard
+
+1. To verify if the JupyterHub services are running, run `kubectl get pods --namespace jupyterhub`.
+2. Ensure that the pod names starting with __hub__ and __proxy__ are in __Running__ status.
+3. Run `kubectl get services --namespace jupyterhub`.
+4. Copy the **External IP** of __proxy-public__ service.
+5. On a web browser installed on the __manager node__, use the External IP address to access the JupyterHub Dashboard.
+6. Enter any __username__ and __password__ combination to enter the Jupyterhub. The __username__ and __password__ can be later configured from the JupyterHub dashboard.
+
+## Prometheus
+
+Prometheus is installed in two different ways:
+  * It is installed on the host when Slurm is installed without installing Kubernetes.
+  * It is installed as a Kubernetes role, if you install both Slurm and Kubernetes.
+
+If Prometheus is installed as part of kubernetes role, run the following commands before starting the Prometheus UI:
+1. `export POD_NAME=$(kubectl get pods --namespace default -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}")`
+2. `echo $POD_NAME`
+3. `kubectl --namespace default port-forward $POD_NAME 9090`
+
+If Prometheus is installed on the host, start the Prometheus web server by run the following command:
+1. Navigate to Prometheus folder. The default path is __/var/lib/prometheus-2.23.0.linux-amd64/__.
+2. Start the web server, 
+  `./prometheus`
+
+Go to http://localhost:9090 to launch the Prometheus UI in the browser.
+
+__Note:__ 
+* If Prometheus was installed through slurm without Kubernetes then it will be removed when Kubernetes is installed as Prometheus would be running as a pod. 
+* You can use a single instance of Prometheus when both kubernetes and slurm are installed.
+
+
+
+
+
+ 
+
+
+
+
+
+

+ 0 - 27
docs/PREINSTALL.md

@@ -1,27 +0,0 @@
-# Pre-Installation Preparation
-
-## Assumptions
-Omnia assumes that prior to installation:
-* Systems have a base operating system (currently CentOS 7 or 8)
-* Network(s) has been cabled and nodes can reach the internet
-* SSH Keys for `root` have been installed on all nodes to allow for password-less SSH
-* Ansible is installed on either the master node or a separate deployment node
-```
-yum install ansible
-```
-
-## Example system designs
-Omnia can configure systems which use Ethernet- or Infiniband-based fabric to connect the compute servers.
-
-![Example system configuration with Ethernet fabric](images/example-system-ethernet.png)
-
-![Example system configuration with Infiniband fabric](images/example-system-infiniband.png)
-
-## Network Setup
-Omnia assumes that servers are already connected to the network and have access to the internet.
-### Network Topology
-Possible network configurations include:
-* A flat topology where all nodes are connected to a switch which includes an uplink to the internet. This requires multiple externally-facing IP addresses
-* A hierarchical topology where compute nodes are connected to a common switch, but the master node contains a second network connection which is connected to the internet. All outbound/inbound traffic would be routed through the master node. This requires setting up firewall rules for IP masquerade, see [here](https://www.server-world.info/en/note?os=CentOS_7&p=firewalld&f=2) for an example.
-### IP and Hostname Assignment
-The recommended setup is to assign IP addresses to individual servers. This can be done manually by logging onto each node, or via DHCP.

+ 28 - 0
docs/PREINSTALL_OMNIA.md

@@ -0,0 +1,28 @@
+# Preparation to install Omnia
+
+## Assumptions
+Ensure that the following prerequisites are met:
+* The manager and compute nodes must be running CentOS 7.9 2009 OS.
+* All nodes are connected to the network and have access to Internet.
+* SSH Keys for root have been installed on all nodes to allow for password-less SSH.
+* On the manager node, install Ansible and Git using the following commands:
+	* `yum install epel-release -y`
+	* `yum install ansible-2.9.18 git -y`  
+__Note:__ Ansible must be installed using __yum__. If Ansible is installed using __pip3__, re-install it using the __yum__ command again.
+
+
+## Example system designs
+Omnia can configure systems which use Ethernet or Infiniband-based fabric to connect the compute servers.
+
+![Example system configuration with Ethernet fabric](images/example-system-ethernet.png)
+
+![Example system configuration with Infiniband fabric](images/example-system-infiniband.png)
+
+## Network Setup
+Omnia assumes that servers are already connected to the network and have access to the internet.
+### Network Topology
+Possible network configurations include:
+* A flat topology where all nodes are connected to a switch which includes an uplink to the internet. This requires multiple externally-facing IP addresses
+* A hierarchical topology where compute nodes are connected to a common switch, but the manager node contains a second network connection which is connected to the internet. All outbound/inbound traffic would be routed through the manager node. This requires setting up firewall rules for IP masquerade, see [here](https://www.server-world.info/en/note?os=CentOS_7&p=firewalld&f=2) for an example.
+### IP and Hostname Assignment
+The recommended setup is to assign IP addresses to individual servers. This can be done manually by logging onto each node, or via DHCP.

+ 36 - 0
docs/PREINSTALL_OMNIA_APPLIANCE.md

@@ -0,0 +1,36 @@
+# Prerequisites to install the Omnia appliance
+
+Ensure that the following prequisites are met before installing the Omnia appliance:
+* On the management node, install Ansible and Git using the following commands:
+	* `yum install epel-release -y`
+	* `yum install ansible-2.9.18 git -y`  
+	__Note:__ Ansible must be installed using __yum__. If Ansible is installed using __pip3__, re-install it using the __yum__ command again.
+* Ensure a stable Internet connection is available on management node and target nodes. 
+* CentOS 7.9 2009 is installed on the management node.
+* To provision the bare metal servers, go to http://isoredirect.centos.org/centos/7/isos/x86_64/ and download the **CentOS-7-x86_64-Minimal-2009** ISO file.
+* For DHCP configuration, you can provide a mapping file. The provided details must be in the format: MAC, Hostname, IP. For example, `xx:xx:4B:C4:xx:44,validation01,172.17.0.81` and  `xx:xx:4B:C5:xx:52,validation02,172.17.0.82` are valid entries.  
+__Note:__ A template for mapping file is present in the `omnia/examples`, named `mapping_file.csv`. The header in the template file must not be deleted before saving the file.  
+__Note:__ Ensure that duplicate values are not provided for MAC, Hostname, and IP in the mapping file. The Hostname should not contain the following characters: , (comma), \. (period), and - (hyphen).
+* Connect one of the Ethernet cards on the management node to the HPC switch and the other ethernet card connected to the global network.
+* If SELinux is not disabled on the management node, disable it from `/etc/sysconfig/selinux` and restart the management node.
+* The default mode of PXE is __UEFI__, and the BIOS Legacy Mode is not supported.
+* The default boot order for the bare metal servers must be __PXE__.
+* Configuration of __RAID__ is not part of Omnia. If bare metal servers have __RAID__ controller installed then it is mandatory to create **VIRTUAL DISK**.
+
+## Assumptions
+
+## Example system designs
+Omnia can configure systems which use Ethernet or Infiniband-based fabric to connect the compute servers.
+
+![Example system configuration with Ethernet fabric](images/example-system-ethernet.png)
+
+![Example system configuration with Infiniband fabric](images/example-system-infiniband.png)
+
+## Network Setup
+Omnia assumes that servers are already connected to the network and have access to the internet.
+### Network Topology
+Possible network configurations include:
+* A flat topology where all nodes are connected to a switch which includes an uplink to the internet. This requires multiple externally-facing IP addresses
+* A hierarchical topology where compute nodes are connected to a common switch, but the manager node contains a second network connection which is connected to the internet. All outbound/inbound traffic would be routed through the manager node. This requires setting up firewall rules for IP masquerade, see [here](https://www.server-world.info/en/note?os=CentOS_7&p=firewalld&f=2) for an example.
+### IP and Hostname Assignment
+The recommended setup is to assign IP addresses to individual servers. This can be done manually by logging onto each node, or via DHCP.

文件差異過大導致無法顯示
+ 78 - 13
docs/README.md


+ 1 - 0
docs/_config.yml

@@ -2,3 +2,4 @@ theme: jekyll-theme-minimal
 title: Omnia
 description: Ansible playbook-based tools for deploying Slurm and Kubernetes clusters for High Performance Computing, Machine Learning, Deep Learning, and High-Performance Data Analytics
 logo: images/omnia-logo.png
+markdown: kramdown

二進制
docs/images/omnia-branch-structure.png


二進制
docs/images/omnia-overview.png


+ 1 - 1
examples/README.md

@@ -1,7 +1,7 @@
 # Examples
 
 
-The examples [K8s Submit](https://github.com/dellhpc/omnia/blob/master/examples/k8s-TensorFlow-resnet50-multinode-MPIOperator.yaml) and [SLURM submit](https://github.com/dellhpc/omnia/blob/master/examples/slurm-TensorFlow-resnet50-multinode-MPI.batch) are provide as examples for running the resnet50 benchmark with TensorFlow on 8 GPUs using 2 C4140s.
+The examples [K8s Submit](https://github.com/dellhpc/omnia/blob/devel/examples/k8s-TensorFlow-resnet50-multinode-MPIOperator.yaml) and [SLURM submit](https://github.com/dellhpc/omnia/blob/devel/examples/slurm-TensorFlow-resnet50-multinode-MPI.batch) are provide as examples for running the resnet50 benchmark with TensorFlow on 8 GPUs using 2 C4140s.
 
 ## Submitting the example
 

+ 3 - 6
kubernetes/host_inventory_file

@@ -2,19 +2,16 @@ all:
   children:
     cluster:
       children:
-        master:
+        manager:
           hosts:
             compute000:
         workers:
           children:
             compute:
               hosts:
-                compute003:
+                compute001:
             gpus:
               hosts:
                 compute002:
+                compute003:
                 compute004:
-                compute005:
-      vars:
-        single_node: false
-        master_ip: 10.0.0.100

+ 13 - 0
examples/host_inventory_file.ini

@@ -0,0 +1,13 @@
+[manager]
+friday
+
+[compute]
+compute000
+compute[002:005]
+
+[workers:children]
+compute
+
+[cluster:children]
+manager
+workers

+ 2 - 0
examples/mapping_file.csv

@@ -0,0 +1,2 @@
+MAC,Hostname,IP
+xx:yy:zz:aa:bb,server,1.2.3.4

+ 0 - 55
kubernetes/kubernetes.yml

@@ -1,55 +0,0 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
----
-#Playbook for kubernetes cluster 
-
-#collect info from everything
-- hosts: all
-
-# Apply Common Installation and Config
-- hosts: cluster
-  gather_facts: false
-  roles:
-    - common
-
-# Apply GPU Node Config
-- hosts: gpus
-  gather_facts: false
-  roles:
-    - computeGPU
-
-# Apply Master Config
-- hosts: master
-  gather_facts: false
-  roles:
-    - master
-
-# Start K8s on master server
-- hosts: master
-  gather_facts: false
-  roles:
-    - startmaster
-
-# Start K8s worker servers
-- hosts: compute,gpus
-  gather_facts: false
-  roles:
-    - startworkers
-
-# Start K8s worker servers
-- hosts: master
-  gather_facts: false
-  roles:
-    - startservices

+ 0 - 3
kubernetes/roles/common/files/nvidia

@@ -1,3 +0,0 @@
-#!/bin/sh
-PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" exec nvidia-container-runtime-hook "$@"
-

+ 0 - 21
kubernetes/roles/common/handlers/main.yml

@@ -1,21 +0,0 @@
----
-
-#- name: Enable docker service
-  #service:
-    #name: docker
-    #enabled: yes
-#
-- name: Start and Enable docker service
-  service:
-    name: docker
-    state: restarted
-    enabled: yes
-  #tags: install
-
-- name: Start and Enable Kubernetes - kubelet
-  service:
-    name: kubelet
-    state: started
-    enabled: yes
-  #tags: install
-

+ 0 - 140
kubernetes/roles/common/tasks/main.yml

@@ -1,140 +0,0 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved. 
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
----
-
-- name: add kubernetes repo
-  copy: src=kubernetes.repo dest=/etc/yum.repos.d/ owner=root group=root mode=644
-  tags: install
-
-# add ElRepo GPG Key
-- rpm_key:
-    state: present
-    key: https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
-  tags: install
-
-- name: add ElRepo (Nvidia kmod drivers)
-  yum:
-    name: http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
-    state: present
-  tags: install
-
-- name: update sysctl to handle incorrectly routed traffic when iptables is bypassed
-  copy: src=k8s.conf dest=/etc/sysctl.d/ owner=root group=root mode=644
-  tags: install
-
-- name: update sysctl
-  command: /sbin/sysctl --system
-  tags: install
-
-- name: Install EPEL Repository
-  yum: name=epel-release state=present
-  tags: install
-
-#likely need to add a reboot hook in here
-#- name: update kernel and all other system packages
-  #yum: name=* state=latest
-  #tags: install
-
-- name: disable swap
-  command: /sbin/swapoff -a
-  tags: install
-
-# Disable selinux
-- selinux:
-    state: disabled
-  tags: install
-
-- name: install common packages
-  yum: 
-    name:
-      - yum-plugin-versionlock
-      - gcc
-      - nfs-utils
-      - python-pip
-      - docker
-      - bash-completion
-      - kubelet-1.16.7
-      - kubeadm-1.16.7
-      - kubectl-1.16.7
-      - nvidia-detect
-    state: present
-  tags: install
-
-- name: versionlock kubernetes
-  command: yum versionlock kubelet-1.16.7 kubectl-1.16.7 kubeadm-1.16.7
-  tags: install
-  
-
-- name: install InfiniBand Support
-  yum:
-    name: "@Infiniband Support"
-    state: present
-
-- name: upgrade pip
-  command: /bin/pip install --upgrade pip
-  tags: install
-
-#- name: Enable DevicePlugins for all GPU nodes (nvidia-container-runtime-hook)
-  #copy: src=nvidia dest=/usr/libexec/oci/hooks.d/ owner=root group=root mode=755
-  #tags: install
-
-- name: Add KUBE_EXTRA_ARGS to enable GPUs
-  lineinfile:
-    path: /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
-    line: 'Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true"'
-    insertbefore: 'KUBELET_KUBECONFIG_ARGS='
-  tags: install
-
-- name: Start and Enable docker service
-  service:
-    name: docker
-    state: restarted
-    enabled: yes
-  tags: install
-
-- name: Start and Enable Kubernetes - kubelet
-  service:
-    name: kubelet
-    state: restarted
-    enabled: yes
-  tags: install
-
-- name: Start and rpcbind service
-  service:
-    name: rpcbind
-    state: restarted
-    enabled: yes
-  tags: install
-
-- name: Start and nfs-server service
-  service:
-    name: nfs-server
-    state: restarted
-    enabled: yes
-  tags: install
-
-- name: Start and nfs-lock service
-  service:
-    name: nfs-lock
-    #state: restarted
-    enabled: yes
-  tags: install
-
-- name: Start and nfs-idmap service
-  service:
-    name: nfs-idmap
-    state: restarted
-    enabled: yes
-  tags: install

+ 0 - 10
kubernetes/roles/common/vars/main.yml

@@ -1,10 +0,0 @@
----
-
-common_packages:
-  - epel-release
-  - python-pip
-  - docker
-  - bash-completion
-  - kubelet 
-  - kubeadm
-  - kubectl

+ 0 - 3
kubernetes/roles/computeGPU/files/k8s.conf

@@ -1,3 +0,0 @@
-net.bridge.bridge-nf-call-ip6tables = 1
-net.bridge.bridge-nf-call-iptables = 1
-

+ 0 - 8
kubernetes/roles/computeGPU/files/kubernetes.repo

@@ -1,8 +0,0 @@
-[kubernetes]
-name=Kubernetes
-baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
-enabled=1
-gpgcheck=1
-repo_gpgcheck=1
-gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
-

+ 0 - 3
kubernetes/roles/computeGPU/files/nvidia

@@ -1,3 +0,0 @@
-#!/bin/sh
-PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" exec nvidia-container-runtime-hook "$@"
-

+ 0 - 21
kubernetes/roles/computeGPU/handlers/main.yml

@@ -1,21 +0,0 @@
----
-
-#- name: Enable docker service
-  #service:
-    #name: docker
-    #enabled: yes
-#
-- name: Start and Enable docker service
-  service:
-    name: docker
-    state: restarted
-    enabled: yes
-  #tags: install
-
-- name: Start and Enable Kubernetes - kubelet
-  service:
-    name: kubelet
-    state: started
-    enabled: yes
-  #tags: install
-

+ 0 - 78
kubernetes/roles/computeGPU/tasks/main.yml

@@ -1,78 +0,0 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
----
-- name: install Nvidia driver
-  yum: 
-    name: 
-      - kmod-nvidia
-      #- nvidia-x11-drv
-    state: present
-  tags: install
-
-#- name: add Nvidia container runtime support
-  #get_url:
-    #url: https://nvidia.github.io/nvidia-docker/centos7/nvidia-docker.repo
-    #dest: /etc/yum.repos.d/nvidia-docker.repo
-  #tags: install
-
-- name: add Nvidia container runtime support
-  get_url:
-    url: https://nvidia.github.io/nvidia-container-runtime/centos7/nvidia-container-runtime.repo
-    dest: /etc/yum.repos.d/nvidia-container-runtime.repo
-  tags: install, testing
-
-# disable gpg key (because Nvidia doesn't know how to make that work yet for some reason)
-- replace:
-    path: /etc/yum.repos.d/nvidia-container-runtime.repo
-    regexp: 'repo_gpgcheck=1'
-    replace: 'repo_gpgcheck=0'
-    backup: yes
-  tags: testing
-
-- name: install Nvidia-container-runtime-hook
-  yum: 
-    name: 
-      #- nvidia-detect
-      #- kmod-nvidia-410.73-1.el7_5.elrepo
-      - nvidia-container-runtime-hook
-    state: present
-  tags: install
-
-
-# This needs to be done on GPU nodes 
-#- name: Enable DevicePlugins for all GPU nodes (nvidia-container-runtime-hook)
-  #copy: src=nvidia dest=/usr/libexec/oci/hooks.d/ owner=root group=root mode=755
-  #tags: install
-
-#- name: Add KUBE_EXTRA_ARGS to enable Plugins (GPU support)  --III alreday done in common
-  #lineinfile:
-    #path: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
-    #line: 'Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true"'
-    #insertbefore: 'KUBELET_KUBECONFIG_ARGS='
-  #tags: install
-
-- name: Restart and Enable docker service
-  service:
-    name: docker
-    state: restarted
-    enabled: yes
-  tags: install
-
-- name: Restart and Enable Kubernetes - kubelet
-  service:
-    name: kubelet
-    state: restarted
-    enabled: yes
-  tags: install

+ 0 - 10
kubernetes/roles/computeGPU/vars/main.yml

@@ -1,10 +0,0 @@
----
-
-common_packages:
-  - epel-release
-  - python-pip
-  - docker
-  - bash-completion
-  - kubelet 
-  - kubeadm
-  - kubectl

+ 0 - 122
kubernetes/roles/kubeflow/tasks/main.yml

@@ -1,122 +0,0 @@
-#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
----
-
-#Configure build and deploy kubeflow v1.0 
-
-- name: Download kfctl v1.0.2 release from the Kubeflow releases page.
-  unarchive:
-    src: https://github.com/kubeflow/kfctl/releases/download/v1.0.2/kfctl_v1.0.2-0-ga476281_linux.tar.gz
-    dest: /usr/bin/
-    remote_src: yes
-
-- name: Delete Omnia Kubeflow Directory if exists
-  file:
-    path: /root/k8s/omnia-kubeflow
-    state: absent
-
-- name: Create Kubeflow Directory
-  file:
-    path: /root/k8s/omnia-kubeflow
-    state: directory
-    recurse: yes
-
-- name: Build Kubeflow Configuration
-  shell: 
-    cmd: /usr/bin/kfctl build -V -f https://raw.githubusercontent.com/kubeflow/manifests/v1.0-branch/kfdef/kfctl_k8s_istio.v1.0.2.yaml
-    chdir: /root/k8s/omnia-kubeflow
-
-- name: Modify Cpu Limit for istio-ingressgateway-service-account 
-  replace:
-    path: /root/k8s/omnia-kubeflow/kustomize/istio-install/base/istio-noauth.yaml
-    after: 'serviceAccountName: istio-ingressgateway-service-account'
-    before: '---'
-    regexp: 'cpu: 100m'
-    replace: 'cpu: 2'
-  
-- name: Modify Mem Limit for istio-ingressgateway-service-account 
-  replace:
-    path: /root/k8s/omnia-kubeflow/kustomize/istio-install/base/istio-noauth.yaml
-    after: 'serviceAccountName: istio-ingressgateway-service-account'
-    before: '---'
-    regexp: 'memory: 128Mi'
-    replace: 'memory: 512Mi'
-
-- name: Modify Cpu Request for istio-ingressgateway-service-account 
-  replace:
-    path: /root/k8s/omnia-kubeflow/kustomize/istio-install/base/istio-noauth.yaml
-    after: 'serviceAccountName: istio-ingressgateway-service-account'
-    before: '---'
-    regexp: 'cpu: 10m'
-    replace: 'cpu: 1'
-  
-- name: Modify Mem Request for istio-ingressgateway-service-account 
-  replace:
-    path: /root/k8s/omnia-kubeflow/kustomize/istio-install/base/istio-noauth.yaml
-    after: 'serviceAccountName: istio-ingressgateway-service-account'
-    before: '---'
-    regexp: 'memory: 40Mi'
-    replace: 'memory: 256Mi'
-
-
-- name: Modify Cpu Limit for kfserving-gateway
-  replace:
-    path: /root/k8s/omnia-kubeflow/kustomize/kfserving-gateway/base/deployment.yaml
-    after: 'serviceAccountName: istio-ingressgateway-service-account'
-    before: 'env:'
-    regexp: 'cpu: 100m'
-    replace: 'cpu: 2'
-  
-- name: Modify Mem Limit for kfserving-gateway
-  replace:
-    path: /root/k8s/omnia-kubeflow/kustomize/kfserving-gateway/base/deployment.yaml
-    after: 'serviceAccountName: istio-ingressgateway-service-account'
-    before: 'env:'
-    regexp: 'memory: 128Mi'
-    replace: 'memory: 512Mi'
-
-- name: Modify Cpu Request for kfserving-gateway
-  replace:
-    path: /root/k8s/omnia-kubeflow/kustomize/kfserving-gateway/base/deployment.yaml
-    after: 'serviceAccountName: istio-ingressgateway-service-account'
-    before: 'env:'
-    regexp: 'cpu: 10m'
-    replace: 'cpu: 1'
-  
-- name: Modify Mem Request for kfserving-gateway
-  replace:
-    path: /root/k8s/omnia-kubeflow/kustomize/kfserving-gateway/base/deployment.yaml
-    after: 'serviceAccountName: istio-ingressgateway-service-account'
-    before: 'env:'
-    regexp: 'memory: 40Mi'
-    replace: 'memory: 256Mi'
-
-
-- name: Change Argo base service from NodePort to LoadBalancer
-  replace:
-    path: /root/k8s/omnia-kubeflow/kustomize/argo/base/service.yaml
-    regexp: 'NodePort'
-    replace: 'LoadBalancer'
-
-- name: Change istio-install base istio-noauth service from NodePort to LoadBalancer
-  replace:
-    path: /root/k8s/omnia-kubeflow/kustomize/istio-install/base/istio-noauth.yaml
-    regexp: 'NodePort'
-    replace: 'LoadBalancer'
-
-- name: Apply Kubeflow Configuration
-  shell: 
-    cmd: /usr/bin/kfctl apply -V -f /root/k8s/omnia-kubeflow/kfctl_k8s_istio.v1.0.2.yaml
-    chdir: /root/k8s/omnia-kubeflow

+ 0 - 3
kubernetes/roles/master/files/k8s.conf

@@ -1,3 +0,0 @@
-net.bridge.bridge-nf-call-ip6tables = 1
-net.bridge.bridge-nf-call-iptables = 1
-

+ 0 - 0
kubernetes/roles/master/files/kubernetes.repo


部分文件因文件數量過多而無法顯示