浏览代码

Merge branch 'devel' into devel

Lucas A. Wilson 4 年之前
父节点
当前提交
d36f810800
共有 69 个文件被更改,包括 1638 次插入162 次删除
  1. 79 20
      omnia.yml
  2. 1 1
      roles/common/files/daemon.json
  3. 1 1
      roles/common/files/inventory.fact
  4. 17 12
      roles/common/handlers/main.yml
  5. 5 59
      roles/common/tasks/main.yml
  6. 25 25
      roles/common/tasks/ntp.yml
  7. 1 1
      roles/common/tasks/nvidia.yml
  8. 1 2
      roles/common/templates/chrony.conf.j2
  9. 1 3
      roles/common/templates/ntp.conf.j2
  10. 7 17
      roles/common/vars/main.yml
  11. 0 0
      roles/k8s_common/files/k8s.conf
  12. 0 0
      roles/k8s_common/files/kubernetes.repo
  13. 28 0
      roles/k8s_common/handlers/main.yml
  14. 77 0
      roles/k8s_common/tasks/main.yml
  15. 31 0
      roles/k8s_common/vars/main.yml
  16. 2 2
      roles/firewalld/tasks/main.yml
  17. 1 2
      roles/firewalld/vars/main.yml
  18. 0 0
      roles/k8s_manager/tasks/main.yml
  19. 0 0
      roles/k8s_manager/vars/main.yml
  20. 40 0
      roles/k8s_nfs_client_setup/tasks/main.yml
  21. 20 0
      roles/k8s_nfs_client_setup/vars/main.yml
  22. 84 0
      roles/k8s_nfs_server_setup/tasks/main.yml
  23. 25 0
      roles/k8s_nfs_server_setup/vars/main.yml
  24. 0 0
      roles/k8s_start_manager/files/create_admin_user.yaml
  25. 0 0
      roles/k8s_start_manager/files/create_clusterRoleBinding.yaml
  26. 0 0
      roles/startmanager/files/data-pv.yaml
  27. 0 0
      roles/startmanager/files/data2-pv.yaml
  28. 0 0
      roles/startmanager/files/data3-pv.yaml
  29. 0 0
      roles/startmanager/files/data4-pv.yaml
  30. 0 0
      roles/startmanager/files/flannel_net.sh
  31. 0 0
      roles/startmanager/files/katib-pv.yaml
  32. 0 0
      roles/k8s_start_manager/files/kube-flannel.yaml
  33. 0 0
      roles/k8s_start_manager/files/kubeflow_persistent_volumes.yaml
  34. 0 0
      roles/startmanager/files/minio-pvc.yaml
  35. 0 0
      roles/startmanager/files/mysql-pv.yaml
  36. 0 0
      roles/k8s_start_manager/files/nfs-class.yaml
  37. 0 0
      roles/k8s_start_manager/files/nfs-deployment.yaml
  38. 0 0
      roles/k8s_start_manager/files/nfs-serviceaccount.yaml
  39. 0 0
      roles/k8s_start_manager/files/nfs_clusterrole.yaml
  40. 0 0
      roles/k8s_start_manager/files/nfs_clusterrolebinding.yaml
  41. 0 0
      roles/startmanager/files/notebook-pv.yaml
  42. 0 0
      roles/startmanager/files/persistent_volumes.yaml
  43. 0 0
      roles/k8s_start_manager/files/pvc.yaml
  44. 0 0
      roles/startmanager/files/tiller_config.sh
  45. 2 2
      roles/startmanager/tasks/main.yml
  46. 1 1
      roles/startmanager/vars/main.yml
  47. 0 0
      roles/k8s_start_services/files/metal-config.yaml
  48. 0 0
      roles/k8s_start_services/files/metallb.yaml
  49. 0 0
      roles/k8s_start_services/tasks/main.yml
  50. 2 3
      roles/startservices/vars/main.yml
  51. 0 0
      roles/k8s_start_workers/tasks/main.yml
  52. 0 0
      roles/k8s_start_workers/vars/main.yml
  53. 0 3
      roles/manager/files/k8s.conf
  54. 0 8
      roles/manager/files/kubernetes.repo
  55. 113 0
      test/test_jupyterhub.yml
  56. 155 0
      test/test_k8s_common.yml
  57. 226 0
      test/test_k8s_firewalld.yml
  58. 70 0
      test/test_k8s_manager.yml
  59. 144 0
      test/test_k8s_start_manager_workers.yml
  60. 97 0
      test/test_k8s_start_services.yml
  61. 50 0
      test/test_k8s_start_workers.yml
  62. 123 0
      test/test_kubeflow.yml
  63. 22 0
      test/test_vars/test_jupyterhub_vars.yml
  64. 34 0
      test/test_vars/test_k8s_common_vars.yml
  65. 38 0
      test/test_vars/test_k8s_firewalld_vars.yml
  66. 17 0
      test/test_vars/test_k8s_manager_vars.yml
  67. 38 0
      test/test_vars/test_k8s_start_manager_workers_vars.yml
  68. 38 0
      test/test_vars/test_k8s_start_services_vars.yml
  69. 22 0
      test/test_vars/test_kubeflow_vars.yml

+ 79 - 20
omnia.yml

@@ -13,72 +13,124 @@
 # limitations under the License.
 ---
 
-# Omnia playbook. Will be updated later.
+- name: Validate the cluster
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  roles:
+    - cluster_validation
 
 - name: Gather facts from all the nodes
   hosts: all
 
+- name: Prepare the cluster with passwordless ssh from manager to compute
+  hosts: manager
+  gather_facts: false
+  pre_tasks:
+    - name: Set Fact
+      set_fact:
+        ssh_to: "{{ groups['compute'] }}"
+  roles:
+    - cluster_preperation
+
+- name: Prepare the cluster with passwordless ssh from compute to manager
+  hosts: compute
+  gather_facts: false
+  pre_tasks:
+    - name: Set Fact
+      set_fact:
+        ssh_to: "{{ groups['manager'] }}"
+  roles:
+    - cluster_preperation
+    
 - name: Apply common installation and config
   hosts: manager, compute
   gather_facts: false
   roles:
     - common
- 
-#- name: Apply GPU node config
-#  hosts: gpus
-#  gather_facts: false
-#  roles:
-#    - compute_gpu
+  tags: common
+
+- name: Apply common K8s installation and config
+  hosts: manager, compute
+  gather_facts: false
+  roles:
+    - k8s_common
+  tags: kubernetes
+
+- name: Apply GPU node config
+  hosts: gpus
+  gather_facts: false
+  roles:
+    - compute_gpu
 
 - name: Apply K8s manager config
   hosts: manager
   gather_facts: true
   roles:
-    - manager
+    - k8s_manager
+  tags: kubernetes
 
 - name: Apply K8s firewalld config on manager and compute nodes
   hosts: manager, compute
   gather_facts: false
   roles:
-    - firewalld
+    - k8s_firewalld
+  tags: kubernetes
+
+- name: Apply NFS server setup on manager node
+  hosts: manager
+  gather_facts: false
+  roles:
+    - k8s_nfs_server_setup
+  tags: kubernetes
+
+- name: Apply NFS client setup on compute nodes
+  hosts: compute
+  gather_facts: false
+  roles:
+    - k8s_nfs_client_setup
+  tags: kubernetes
 
 - name: Start K8s on manager server
   hosts: manager
   gather_facts: true
   roles:
-    - startmanager
+    - k8s_start_manager
+  tags: kubernetes
 
 - name: Start K8s worker servers on compute nodes
   hosts: compute
   gather_facts: false
   roles:
-    - startworkers
+    - k8s_start_workers
+  tags: kubernetes
 
 - name: Start K8s worker servers on manager nodes
   hosts: manager
   gather_facts: false
   roles:
-    - startservices
+    - k8s_start_services
+  tags: kubernetes
 
-- name: Apply SLURM manager config
-  hosts: manager
+- name: Apply common Slurm installation and config
+  hosts: manager, compute
   gather_facts: false
   roles:
-    - slurm_manager
+    - slurm_common
   tags: slurm
 
-- name: Apply common Slurm installation and config
-  hosts: manager, compute
+- name: Apply Slurm manager config
+  hosts: manager
   gather_facts: false
   roles:
-    - slurm_common
+    - slurm_manager
   tags: slurm
 
-- name: Start slurm workers
+- name: Start Slurm workers
   hosts: compute
   gather_facts: false
   roles:
-    - start_slurm_workers
+    - slurm_workers
   tags: slurm
 
 - name: Start Slurm services
@@ -87,3 +139,10 @@
   roles:
     - slurm_start_services
   tags: slurm
+
+- name: Install slurm exporter
+  hosts: manager
+  gather_facts: false
+  roles:
+    - slurm_exporter
+  tags: slurm

+ 1 - 1
roles/common/files/daemon.json

@@ -6,4 +6,4 @@
     }
   },
   "default-runtime": "nvidia"
-}
+}

+ 1 - 1
roles/common/files/inventory.fact

@@ -15,4 +15,4 @@ cat << EOF
 }
 EOF
 
-rm -f $INVENTORY
+rm -f $INVENTORY

+ 17 - 12
roles/common/handlers/main.yml

@@ -1,18 +1,23 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
 ---
 
-- name: Start and Enable docker service
-  service:
-    name: docker
-    state: restarted
-    enabled: yes
-  #tags: install
-
-- name: Start and Enable Kubernetes - kubelet
-  service:
-    name: kubelet
+- name: Restart ntpd
+  systemd:
+    name: ntpd
     state: started
     enabled: yes
-  #tags: install
 
 - name: Restart chrony
   service:
@@ -32,4 +37,4 @@
   register: chrony_src
   until:  chrony_src.stdout.find('^*') > -1
   retries: "{{ retry_count }}"
-  delay: "{{ delay_count }}"
+  delay: "{{ delay_count }}"

+ 5 - 59
roles/common/tasks/main.yml

@@ -15,23 +15,17 @@
 
 - name: Create a custom fact directory on each host
   file:
-    path: /etc/ansible/facts.d
+    path: "{{ custom_fact_dir }}"
     state: directory
+    mode: "{{ custom_fact_dir_mode }}"
 
 - name: Install accelerator discovery script
   copy:
     src: inventory.fact
-    dest: /etc/ansible/facts.d/inventory.fact
-    mode: 0755
-
-- name: Add kubernetes repo
-  copy:
-    src: kubernetes.repo
-    dest: "{{ k8s_repo_dest }}"
+    dest: "{{ accelerator_discovery_script_dest }}"
     owner: root
     group: root
-    mode: "{{ k8s_repo_file_mode }}"
-  tags: install
+    mode: "{{ accelerator_discovery_script_mode }}"
 
 - name: Add elrepo GPG key
   rpm_key:
@@ -45,26 +39,6 @@
     state: present
   tags: install
 
-- name: Add docker community edition repository
-  get_url:
-    url: "{{ docker_repo_url }}"
-    dest: "{{ docker_repo_dest }}"
-  tags: install
-
-- name: Update sysctl to handle incorrectly routed traffic when iptables is bypassed
-  copy:
-    src: k8s.conf
-    dest: "{{ k8s_conf_dest }}"
-    owner: root
-    group: root
-    mode: "{{ k8s_conf_file_mode }}"
-  tags: install
-
-- name: Update sysctl
-  command: /sbin/sysctl --system
-  changed_when: true
-  tags: install
-
 - name: Disable swap
   command: /sbin/swapoff -a
   changed_when: true
@@ -84,44 +58,16 @@
 - name: Collect host facts (including acclerator information)
   setup: ~
 
-- name: Install k8s packages
-  package:
-    name: "{{ k8s_packages }}"
-    state: present
-  tags: install
-
-- name: Versionlock kubernetes
-  command: "yum versionlock '{{ item }}'"
-  args:
-    warn: false
-  with_items:
-    - "{{ k8s_packages }}"
-  changed_when: true
-  tags: install
-
 - name: Install infiniBand support
   package:
     name: "@Infiniband Support"
     state: present
   tags: install
 
-- name: Start and enable docker service
-  service:
-    name: docker
-    state: restarted
-    enabled: yes
-  tags: install
-
-- name: Start and enable kubernetes - kubelet
-  service:
-    name: kubelet
-    state: restarted
-    enabled: yes
-
 - name: Deploy time ntp/chrony
   include_tasks: ntp.yml
   tags: install
 
 - name: Install Nvidia drivers and software components
   include_tasks: nvidia.yml
-  when: ansible_local.inventory.nvidia_gpu > 0
+  when: ansible_local.inventory.nvidia_gpu > 0

+ 25 - 25
roles/common/tasks/ntp.yml

@@ -13,28 +13,28 @@
 #  limitations under the License.
 ---
 
-#- name: Deploy ntp servers
-#block:
-#- name: Deploy ntpd
-#package:
-#name: ntp
-#state: present
-#- name: Deploy ntpdate
-#package:
-#name: ntpdate
-#state: present
-#- name: Update ntp servers
-#template:
-#src: ntp.conf.j2
-#dest: "{{ ntp_path }}"
-#owner: root
-#group: root
-#mode: "{{ ntp_mode }}"
-          #backup: yes
-          #notify:
-          #- restart ntpd
-            #- sync ntp clocks
-            #when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  < os_higher_version
+  - name: Deploy ntp servers
+    block:
+      - name: Deploy ntpd
+        package:
+          name: ntp
+          state: present
+      - name: Deploy ntpdate
+        package:
+          name: ntpdate
+          state: present
+      - name: Update ntp servers
+        template:
+          src: ntp.conf.j2
+          dest: "{{ ntp_path }}"
+          owner: root
+          group: root
+          mode: "{{ ntp_mode }}"
+          backup: yes
+        notify:
+          - Restart ntpd
+          - Sync tp clocks
+    when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  < os_higher_version
 
   - name: Deploy chrony server
     block:
@@ -51,6 +51,6 @@
           mode: "{{ ntp_mode }}"
           backup: yes
         notify:
-          - restart chrony
-          - sync chrony sources
-    when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  > os_version
+          - Restart chrony
+          - Sync chrony sources
+    when:  ( ansible_distribution == "CentOS" or   ansible_distribution == "RedHat" ) and ansible_distribution_major_version  > os_version

+ 1 - 1
roles/common/tasks/nvidia.yml

@@ -58,4 +58,4 @@
     name: kubelet
     state: restarted
     enabled: yes
-  tags: install
+  tags: install

+ 1 - 2
roles/common/templates/chrony.conf.j2

@@ -38,5 +38,4 @@ leapsectz right/UTC
 logdir /var/log/chrony
 
 # Select which information is logged.
-#log measurements statistics tracking
-
+#log measurements statistics tracking

+ 1 - 3
roles/common/templates/ntp.conf.j2

@@ -11,6 +11,4 @@ server  {{ item }} iburst
 
 includefile /etc/ntp/crypto/pw
 
-keys /etc/ntp/keys
-
-
+keys /etc/ntp/keys

+ 7 - 17
roles/common/vars/main.yml

@@ -19,32 +19,22 @@ common_packages:
   - gcc
   - nfs-utils
   - python3-pip
-  - docker-ce
   - bash-completion
   - nvidia-detect
   - chrony
   - pciutils
 
-k8s_packages:
-  - kubelet-1.16.7
-  - kubeadm-1.16.7
-  - kubectl-1.16.7
+custom_fact_dir: /etc/ansible/facts.d
 
-k8s_repo_dest: /etc/yum.repos.d/
+custom_fact_dir_mode: 0755
 
-elrepo_gpg_key_url: https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
-
-elrepo_rpm_url: https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
-
-docker_repo_url: https://download.docker.com/linux/centos/docker-ce.repo
+accelerator_discovery_script_dest: /etc/ansible/facts.d/inventory.fact
 
-docker_repo_dest: /etc/yum.repos.d/docker-ce.repo
+accelerator_discovery_script_mode: 0755
 
-k8s_conf_dest: /etc/sysctl.d/
-
-k8s_repo_file_mode: 0644
+elrepo_gpg_key_url: https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
 
-k8s_conf_file_mode: 0644
+elrepo_rpm_url: https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
 
 chrony_path: "/etc/chrony.conf"
 ntp_path: "/etc/ntp.conf"
@@ -73,4 +63,4 @@ nvidia_packages:
   - nvidia-docker2
 
 daemon_file_dest: /etc/docker/
-daemon_file_mode: 0644
+daemon_file_mode: 0644

roles/common/files/k8s.conf → roles/k8s_common/files/k8s.conf


roles/common/files/kubernetes.repo → roles/k8s_common/files/kubernetes.repo


+ 28 - 0
roles/k8s_common/handlers/main.yml

@@ -0,0 +1,28 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Start and Enable docker service
+  service:
+    name: docker
+    state: restarted
+    enabled: yes
+  tags: install
+
+- name: Start and Enable Kubernetes - kubelet
+  service:
+    name: kubelet
+    state: started
+    enabled: yes
+  tags: install

+ 77 - 0
roles/k8s_common/tasks/main.yml

@@ -0,0 +1,77 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+- name: Add kubernetes repo
+  copy:
+    src: kubernetes.repo
+    dest: "{{ k8s_repo_dest }}"
+    owner: root
+    group: root
+    mode: "{{ k8s_repo_file_mode }}"
+  tags: install
+
+- name: Add docker community edition repository
+  get_url:
+    url: "{{ docker_repo_url }}"
+    dest: "{{ docker_repo_dest }}"
+  tags: install
+
+- name: Update sysctl to handle incorrectly routed traffic when iptables is bypassed
+  copy:
+    src: k8s.conf
+    dest: "{{ k8s_conf_dest }}"
+    owner: root
+    group: root
+    mode: "{{ k8s_conf_file_mode }}"
+  tags: install
+
+- name: Update sysctl
+  command: /sbin/sysctl --system
+  changed_when: true
+  tags: install
+
+- name: Install docker
+  package:
+    name: docker-ce
+    state: present
+  tags: install
+
+- name: Install k8s packages
+  package:
+    name: "{{ k8s_packages }}"
+    state: present
+  tags: install
+
+- name: Versionlock kubernetes
+  command: "yum versionlock '{{ item }}'"
+  args:
+    warn: false
+  with_items:
+    - "{{ k8s_packages }}"
+  changed_when: true
+  tags: install
+
+- name: Start and enable docker service
+  service:
+    name: docker
+    state: restarted
+    enabled: yes
+  tags: install
+
+- name: Start and enable kubernetes - kubelet
+  service:
+    name: kubelet
+    state: restarted
+    enabled: yes

+ 31 - 0
roles/k8s_common/vars/main.yml

@@ -0,0 +1,31 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+k8s_packages:
+  - kubelet-1.16.7
+  - kubeadm-1.16.7
+  - kubectl-1.16.7
+
+k8s_repo_dest: /etc/yum.repos.d/
+
+docker_repo_url: https://download.docker.com/linux/centos/docker-ce.repo
+
+docker_repo_dest: /etc/yum.repos.d/docker-ce.repo
+
+k8s_conf_dest: /etc/sysctl.d/
+
+k8s_repo_file_mode: 0644
+
+k8s_conf_file_mode: 0644

+ 2 - 2
roles/firewalld/tasks/main.yml

@@ -40,7 +40,7 @@
     port: "{{ item }}/tcp"
     permanent: yes
     state: enabled
-  with_items: '{{ k8s_worker_ports }}'
+  with_items: '{{ k8s_compute_ports }}'
   when: "'compute' in group_names"
   tags: firewalld
 
@@ -81,4 +81,4 @@
     name: firewalld
     state: stopped
     enabled: no
-  tags: firewalld
+  tags: firewalld

+ 1 - 2
roles/firewalld/vars/main.yml

@@ -25,7 +25,7 @@ k8s_master_ports:
   - 10252
 
 # Worker nodes firewall ports
-k8s_worker_ports:
+k8s_compute_ports:
   - 10250
   - 30000-32767
 
@@ -35,7 +35,6 @@ calico_udp_ports:
 calico_tcp_ports:
   - 5473
   - 179
-  - 5473
 
 # Flannel CNI firewall ports
 flannel_udp_ports:

roles/manager/tasks/main.yml → roles/k8s_manager/tasks/main.yml


roles/manager/vars/main.yml → roles/k8s_manager/vars/main.yml


+ 40 - 0
roles/k8s_nfs_client_setup/tasks/main.yml

@@ -0,0 +1,40 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Install nfs-utils
+  package:
+    name: nfs-utils
+    state: present
+  tags: nfs_client
+
+- name: Creating directory to mount NFS Share
+  file:
+    path: "{{ nfs_mnt_dir }}"
+    state: directory
+    mode: "{{ nfs_mnt_dir_mode }}"
+  tags: nfs_client
+
+- name: Mounting NFS Share
+  command: "mount {{ groups['manager'] }}:{{ nfs_mnt_dir }} {{ nfs_mnt_dir }}"
+  changed_when: true
+  args:
+    warn: false
+  tags: nfs_client
+
+- name: Configuring Automount NFS Shares on reboot
+  lineinfile:
+    path: "{{ fstab_file_path }}"
+    line: "{{ groups['manager'] }}:{{ nfs_mnt_dir }}     {{ nfs_mnt_dir }}  nfs     nosuid,rw,sync,hard,intr 0 0"
+  tags: nfs_client

+ 20 - 0
roles/k8s_nfs_client_setup/vars/main.yml

@@ -0,0 +1,20 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+nfs_mnt_dir: /home/k8snfs
+
+nfs_mnt_dir_mode: 0755
+
+fstab_file_path: /etc/fstab

+ 84 - 0
roles/k8s_nfs_server_setup/tasks/main.yml

@@ -0,0 +1,84 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+- name: Install nfs-utils
+  package:
+    name: nfs-utils
+    state: present
+  tags: nfs_server
+
+- name: Install firewalld
+  package:
+    name: firewalld
+    state: present
+  tags: firewalld
+
+- name: Start and enable firewalld
+  service:
+    name: firewalld
+    state: started
+    enabled: yes
+  tags: firewalld
+
+- name: Start and enable rpcbind and nfs-server service
+  service:
+    name: "{{ item }}"
+    state: restarted
+    enabled: yes
+  with_items:
+    - rpcbind
+    - nfs-server
+  tags: nfs_server
+
+- name: Creating NFS share directory
+  file:
+    path: "{{ nfs_share_dir }}"
+    state: directory
+    mode: "{{ nfs_share_dir_mode }}"
+  tags: nfs_server
+
+- name: Adding NFS share entries in /etc/exports
+  lineinfile:
+    path: "{{ exports_file_path }}"
+    line: "{{ nfs_share_dir }} {{ item }}(rw,sync,no_root_squash)"
+  with_items:
+    - "{{ groups['compute'] }}"
+  tags: nfs_server
+
+- name: Exporting the shared directories
+  command: exportfs -r
+  changed_when: true
+  tags: nfs_server
+
+- name: Configuring firewall
+  firewalld:
+    service: "{{ item }}"
+    permanent: true
+    state: enabled
+  with_items:
+    - "{{ nfs_services }}"
+  tags: nfs_server
+
+- name: Reload firewalld
+  command: firewall-cmd --reload
+  changed_when: true
+  tags: nfs_server
+
+- name: Stop and disable firewalld
+  service:
+    name: firewalld
+    state: stopped
+    enabled: no
+  tags: firewalld

+ 25 - 0
roles/k8s_nfs_server_setup/vars/main.yml

@@ -0,0 +1,25 @@
+# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+nfs_share_dir: /home/k8snfs
+
+nfs_share_dir_mode: 0777
+
+exports_file_path: /etc/exports
+
+nfs_services:
+  - mountd
+  - rpc-bind
+  - nfs

roles/startmanager/files/create_admin_user.yaml → roles/k8s_start_manager/files/create_admin_user.yaml


roles/startmanager/files/create_clusterRoleBinding.yaml → roles/k8s_start_manager/files/create_clusterRoleBinding.yaml


+ 0 - 0
roles/startmanager/files/data-pv.yaml


+ 0 - 0
roles/startmanager/files/data2-pv.yaml


+ 0 - 0
roles/startmanager/files/data3-pv.yaml


+ 0 - 0
roles/startmanager/files/data4-pv.yaml


+ 0 - 0
roles/startmanager/files/flannel_net.sh


+ 0 - 0
roles/startmanager/files/katib-pv.yaml


roles/startmanager/files/kube-flannel.yaml → roles/k8s_start_manager/files/kube-flannel.yaml


roles/startmanager/files/kubeflow_persistent_volumes.yaml → roles/k8s_start_manager/files/kubeflow_persistent_volumes.yaml


+ 0 - 0
roles/startmanager/files/minio-pvc.yaml


+ 0 - 0
roles/startmanager/files/mysql-pv.yaml


roles/startmanager/files/nfs-class.yaml → roles/k8s_start_manager/files/nfs-class.yaml


roles/startmanager/files/nfs-deployment.yaml → roles/k8s_start_manager/files/nfs-deployment.yaml


roles/startmanager/files/nfs-serviceaccount.yaml → roles/k8s_start_manager/files/nfs-serviceaccount.yaml


roles/startmanager/files/nfs_clusterrole.yaml → roles/k8s_start_manager/files/nfs_clusterrole.yaml


roles/startmanager/files/nfs_clusterrolebinding.yaml → roles/k8s_start_manager/files/nfs_clusterrolebinding.yaml


+ 0 - 0
roles/startmanager/files/notebook-pv.yaml


+ 0 - 0
roles/startmanager/files/persistent_volumes.yaml


roles/startmanager/files/pvc.yaml → roles/k8s_start_manager/files/pvc.yaml


+ 0 - 0
roles/startmanager/files/tiller_config.sh


+ 2 - 2
roles/startmanager/tasks/main.yml

@@ -146,7 +146,7 @@
   changed_when: true
   tags: init
 
-- name: Edge / Workstation Install allows pods to scheudle on manager
+- name: Edge / Workstation Install allows pods to schedule on manager
   command: kubectl taint nodes --all node-role.kubernetes.io/master-
   when: single_node
-  tags: init
+  tags: init

+ 1 - 1
roles/startmanager/vars/main.yml

@@ -47,4 +47,4 @@ k8s_clusterRoleBinding_file_mode: 0655
 
 calico_yml_url: https://docs.projectcalico.org/manifests/calico.yaml
 
-flannel_yml_url: https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+flannel_yml_url: https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

roles/startservices/files/metal-config.yaml → roles/k8s_start_services/files/metal-config.yaml


roles/startservices/files/metallb.yaml → roles/k8s_start_services/files/metallb.yaml


roles/startservices/tasks/main.yml → roles/k8s_start_services/tasks/main.yml


+ 2 - 3
roles/startservices/vars/main.yml

@@ -27,8 +27,7 @@ k8s_dashboard_yaml_url: https://raw.githubusercontent.com/kubernetes/dashboard/v
 
 helm_stable_repo_url: https://charts.helm.sh/stable
 
-#nfs_server: "{{ ansible_host }}"
-nfs_server: 10.0.0.1
+nfs_server: "{{ ansible_host }}"
 
 nfs_path: /home/k8snfs
 
@@ -44,4 +43,4 @@ mig_strategy: none
 
 gpu_feature_discovery_version: 0.2.0
 
-fpga_device_plugin_yaml_url: https://raw.githubusercontent.com/Xilinx/FPGA_as_a_Service/master/k8s-fpga-device-plugin/fpga-device-plugin.yml
+fpga_device_plugin_yaml_url: https://raw.githubusercontent.com/Xilinx/FPGA_as_a_Service/master/k8s-fpga-device-plugin/fpga-device-plugin.yml

roles/startworkers/tasks/main.yml → roles/k8s_start_workers/tasks/main.yml


roles/startworkers/vars/main.yml → roles/k8s_start_workers/vars/main.yml


+ 0 - 3
roles/manager/files/k8s.conf

@@ -1,3 +0,0 @@
-net.bridge.bridge-nf-call-ip6tables = 1
-net.bridge.bridge-nf-call-iptables = 1
-

+ 0 - 8
roles/manager/files/kubernetes.repo

@@ -1,8 +0,0 @@
-[kubernetes]
-name=Kubernetes
-baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
-enabled=1
-gpgcheck=1
-repo_gpgcheck=1
-gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
-

+ 113 - 0
test/test_jupyterhub.yml

@@ -0,0 +1,113 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_014
+# Execute jupyterhub role in manager nodes with os installed centos 7.9
+- name: OMNIA_UKP_US_VFKP_TC_014
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_jupyterhub_vars.yml
+  tasks:
+    - block:
+        - name: Call jupyterhub role
+          include_role:
+            name: ../platforms/roles/jupyterhub
+      tags: TC_014
+
+    - name: Waiting for the pods deployment
+      pause:
+        minutes: 5
+      tags: TC_014
+
+    - name: Checking all running pods under default namespace
+      command: kubectl get pods --namespace default --field-selector=status.phase=Running
+      register: namesapce_default_running_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_014, VERIFY_014
+
+    - name: Checking K8s services
+      command: kubectl get services
+      register: k8s_services
+      changed_when: false
+      ignore_errors: True
+      tags: TC_014, VERIFY_014
+
+    - name: Validating JupyterHub pods
+      assert:
+        that:
+          - "'hub' in namesapce_default_running_pods.stdout"
+          - "'proxy' in namesapce_default_running_pods.stdout"
+        fail_msg: "{{ jupyterhub_pods_fail_msg }}"
+        success_msg: "{{ jupyterhub_pods_success_msg }}"
+      tags: TC_014, VERIFY_014
+
+    - name: Validating JupyterHub services
+      assert:
+        that:
+          - "'hub' in k8s_services.stdout"
+          - "'proxy-public' in k8s_services.stdout"
+          - "'proxy-api' in k8s_services.stdout"
+        fail_msg: "{{ jupyterhub_services_fail_msg }}"
+        success_msg: "{{ jupyterhub_services_success_msg }}"
+      tags: TC_014, VERIFY_014
+
+# OMNIA_UKP_US_VFKP_TC_015
+# Execute jupyterhub role in manager nodes with JupyterHub already deployed
+- name: OMNIA_UKP_US_VFKP_TC_015
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_jupyterhub_vars.yml
+  tasks:
+    - block:
+        - name: Call jupyterhub role
+          include_role:
+            name: ../platforms/roles/jupyterhub
+      tags: TC_015, VERIFY_015
+
+    - name: Checking all running pods under default namespace
+      command: kubectl get pods --namespace default --field-selector=status.phase=Running
+      register: namesapce_default_running_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_015, VERIFY_015
+
+    - name: Checking K8s services
+      command: kubectl get services
+      register: k8s_services
+      changed_when: false
+      ignore_errors: True
+      tags: TC_015, VERIFY_015
+
+    - name: Validating JupyterHub pods
+      assert:
+        that:
+          - "'hub' in namesapce_default_running_pods.stdout"
+          - "'proxy' in namesapce_default_running_pods.stdout"
+        fail_msg: "{{ jupyterhub_pods_fail_msg }}"
+        success_msg: "{{ jupyterhub_pods_success_msg }}"
+      tags: TC_015, VERIFY_015
+
+    - name: Validating JupyterHub services
+      assert:
+        that:
+          - "'hub' in k8s_services.stdout"
+          - "'proxy-public' in k8s_services.stdout"
+          - "'proxy-api' in k8s_services.stdout"
+        fail_msg: "{{ jupyterhub_services_fail_msg }}"
+        success_msg: "{{ jupyterhub_services_success_msg }}"
+      tags: TC_015, VERIFY_015

+ 155 - 0
test/test_k8s_common.yml

@@ -0,0 +1,155 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# Testcase OMNIA_UKP_US_VFKP_TC_001
+# Execute k8s_common role in manager & compute nodes with os installed centos 7.8
+- name: OMNIA_UKP_US_VFKP_TC_001
+  hosts: manager, compute
+  vars_files:
+    - test_vars/test_k8s_common_vars.yml
+  tasks:
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/k8s_common
+      tags: TC_001
+
+    - name: Checking common packages installation status
+      command: "'{{ item }}' --version"
+      with_items:
+        - "{{ common_packages }}"
+      register: common_packages_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_001, VERIFY_001
+
+    - name: Checking K8s packages installation status
+      command: "'{{ item }}' version"
+      with_items:
+        - "{{ k8_packages }}"
+      register: k8s_packages_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_001, VERIFY_001
+
+    - name: Checking docker service status
+      systemd:
+        name: docker
+      register: docker_service
+      tags: TC_001, VERIFY_001
+
+    - name: Checking K8s service status
+      systemd:
+        name: kubelet
+      register: kubelet_service
+      tags: TC_001, VERIFY_001
+
+    - name: Validate common & K8s packages status
+      assert:
+        that:
+          - "'command not found' not in {{ item }}"
+        fail_msg: "{{ packages_status_fail_msg }}"
+        success_msg: "{{ packages_status_success_msg }}"
+      with_items:
+        - "{{ common_packages_status.results }}"
+        - "{{ k8s_packages_status.results }}"
+      tags: TC_001, VERIFY_001
+
+    - name: Validating docker service status
+      assert:
+        that:
+          - docker_service.status.ActiveState == 'active'
+        fail_msg: "{{ docker_service_fail_msg }}"
+        success_msg: "{{ docker_service_success_msg }}"
+      tags: TC_001, VERIFY_001
+
+    - name: Validating K8s service status
+      assert:
+        that:
+          - kubelet_service.status.ActiveState == 'active'
+        fail_msg: "{{ kubelet_service_fail_msg }}"
+        success_msg: "{{ kubelet_service_success_msg }}"
+      tags: TC_001, VERIFY_001
+
+# Testcase OMNIA_UKP_US_VFKP_TC_002
+# Execute k8s_common role in manager & compute nodes with common and K8s packages already installed
+- name: OMNIA_UKP_US_VFKP_TC_002
+  hosts: manager, compute
+  vars_files:
+    - test_vars/test_k8s_common_vars.yml
+  tasks:
+    - block:
+        - name: Call common role
+          include_role:
+            name: ../roles/k8s_common
+      tags: TC_002, VERIFY_002
+
+    - name: Checking common packages installation status
+      command: "'{{ item }}' --version"
+      with_items:
+        - "{{ common_packages }}"
+      register: common_packages_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_002, VERIFY_002
+
+    - name: Checking K8s packages installation status
+      command: "'{{ item }}' version"
+      with_items:
+        - "{{ k8_packages }}"
+      register: k8s_packages_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_002, VERIFY_002
+
+    - name: Checking docker service status
+      systemd:
+        name: docker
+      register: docker_service
+      tags: TC_002, VERIFY_002
+
+    - name: Checking K8s service status
+      systemd:
+        name: kubelet
+      register: kubelet_service
+      tags: TC_002, VERIFY_002
+
+    - name: Validate common & K8s packages status
+      assert:
+        that:
+          - "'command not found' not in {{ item }}"
+        fail_msg: "{{ packages_status_fail_msg }}"
+        success_msg: "{{ packages_status_success_msg }}"
+        quiet: true
+      with_items:
+        - "{{ common_packages_status.results }}"
+        - "{{ k8s_packages_status.results }}"
+      tags: TC_002, VERIFY_002
+
+    - name: Validating docker service status
+      assert:
+        that:
+          - docker_service.status.ActiveState == 'active'
+        fail_msg: "{{ docker_service_fail_msg }}"
+        success_msg: "{{ docker_service_success_msg }}"
+      tags: TC_002, VERIFY_002
+
+    - name: Validating K8s service status
+      assert:
+        that:
+          - kubelet_service.status.ActiveState == 'active'
+        fail_msg: "{{ kubelet_service_fail_msg }}"
+        success_msg: "{{ kubelet_service_success_msg }}"
+      tags: TC_002, VERIFY_002

+ 226 - 0
test/test_k8s_firewalld.yml

@@ -0,0 +1,226 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_007
+# Execute k8s_firewalld role in manager and compute nodes with os installed centos 7.9
+- name: OMNIA_UKP_US_VFKP_TC_007
+  hosts: manager, compute
+  gather_facts: false
+  vars_files:
+    - test_vars/test_k8s_firewalld_vars.yml
+    - ../roles/k8s_firewalld/vars/main.yml
+  tasks:
+    - block:
+        - name: Call k8s_firewalld role
+          include_role:
+            name: ../roles/k8s_firewalld
+      tags: TC_007
+
+    - name: Start and enable firewalld
+      service:
+        name: firewalld
+        state: started
+        enabled: yes
+      tags: TC_007, VERIFY_007
+
+    - name: Checking firewalld open ports on manager node
+      command: firewall-cmd --list-ports
+      register: manager_firewalld_ports
+      when: "'manager' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Checking firewalld open ports on compute node
+      command: firewall-cmd --list-ports
+      register: compute_firewalld_ports
+      when: "'compute' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating K8s port on manager node
+      assert:
+        that:
+          - "'6443' in manager_firewalld_ports.stdout"
+          - "'2379-2380' in manager_firewalld_ports.stdout"
+          - "'10250' in manager_firewalld_ports.stdout"
+          - "'10251' in manager_firewalld_ports.stdout"
+          - "'10252' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ manager_k8s_ports_status_fail_msg }}"
+        success_msg: "{{ manager_k8s_ports_status_success_msg }}"
+      when: "'manager' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating K8s port on compute node
+      assert:
+        that:
+          - "'10250' in compute_firewalld_ports.stdout"
+          - "'30000-32767' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ compute_k8s_ports_status_fail_msg }}"
+        success_msg: "{{ compute_k8s_ports_status_success_msg }}"
+      when: "'compute' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating Calico udp/tcp ports on manager nodes
+      assert:
+        that:
+          - "'4789' in manager_firewalld_ports.stdout"
+          - "'5473' in manager_firewalld_ports.stdout"
+          - "'179' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ calico_ports_manager_fail_msg }}"
+        success_msg: "{{ calico_ports_manager_success_msg }}"
+      when: "k8s_cni == 'calico' and 'manager' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating Calico udp/tcp ports on compute nodes
+      assert:
+        that:
+          - "'4789' in compute_firewalld_ports.stdout"
+          - "'5473' in compute_firewalld_ports.stdout"
+          - "'179' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ calico_ports_compute_fail_msg }}"
+        success_msg: "{{ calico_ports_compute_success_msg }}"
+      when: "k8s_cni == 'calico' and 'compute' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating Flannel ports on manager nodes
+      assert:
+        that:
+          - "'8285' in manager_firewalld_ports.stdout"
+          - "'8472' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ flannel_ports_manager_fail_msg }}"
+        success_msg: "{{ flannel_ports_manager_success_msg }}"
+      when: "k8s_cni == 'flannel' and 'manager' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Validating Flannel ports on compute nodes
+      assert:
+        that:
+          - "'8285' in compute_firewalld_ports.stdout"
+          - "'8472' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ flannel_ports_compute_fail_msg }}"
+        success_msg: "{{ flannel_ports_compute_success_msg }}"
+      when: "k8s_cni == 'flannel' and 'compute' in group_names"
+      tags: TC_007, VERIFY_007
+
+    - name: Stop and disable firewalld
+      service:
+        name: firewalld
+        state: stopped
+        enabled: no
+      tags: TC_007, VERIFY_007
+
+# OMNIA_UKP_US_VFKP_TC_008
+# Execute k8s_firewalld role in manager and compute nodes with K8s ports already opened
+- name: OMNIA_UKP_US_VFKP_TC_008
+  hosts: manager, compute
+  gather_facts: false
+  vars_files:
+    - test_vars/test_k8s_firewalld_vars.yml
+    - ../roles/k8s_firewalld/vars/main.yml
+  tasks:
+    - block:
+        - name: Call k8s_firewalld role
+          include_role:
+            name: ../roles/k8s_firewalld
+      tags: TC_008
+
+    - name: Start and enable firewalld
+      service:
+        name: firewalld
+        state: started
+        enabled: yes
+      tags: TC_008, VERIFY_008
+
+    - name: Checking firewalld open ports on manager node
+      command: firewall-cmd --list-ports
+      register: manager_firewalld_ports
+      when: "'manager' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Checking firewalld open ports on compute node
+      command: firewall-cmd --list-ports
+      register: compute_firewalld_ports
+      when: "'compute' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating K8s port on manager node
+      assert:
+        that:
+          - "'6443' in manager_firewalld_ports.stdout"
+          - "'2379-2380' in manager_firewalld_ports.stdout"
+          - "'10250' in manager_firewalld_ports.stdout"
+          - "'10251' in manager_firewalld_ports.stdout"
+          - "'10252' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ manager_k8s_ports_status_fail_msg }}"
+        success_msg: "{{ manager_k8s_ports_status_success_msg }}"
+      when: "'manager' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating K8s port on compute node
+      assert:
+        that:
+          - "'10250' in compute_firewalld_ports.stdout"
+          - "'30000-32767' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ compute_k8s_ports_status_fail_msg }}"
+        success_msg: "{{ compute_k8s_ports_status_success_msg }}"
+      when: "'compute' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating Calico udp/tcp ports on manager nodes
+      assert:
+        that:
+          - "'4789' in manager_firewalld_ports.stdout"
+          - "'5473' in manager_firewalld_ports.stdout"
+          - "'179' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ calico_ports_manager_fail_msg }}"
+        success_msg: "{{ calico_ports_manager_success_msg }}"
+      when: "k8s_cni == 'calico' and 'manager' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating Calico udp/tcp ports on compute nodes
+      assert:
+        that:
+          - "'4789' in compute_firewalld_ports.stdout"
+          - "'5473' in compute_firewalld_ports.stdout"
+          - "'179' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ calico_ports_compute_fail_msg }}"
+        success_msg: "{{ calico_ports_compute_success_msg }}"
+      when: "k8s_cni == 'calico' and 'compute' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating Flannel ports on manager nodes
+      assert:
+        that:
+          - "'8285' in manager_firewalld_ports.stdout"
+          - "'8472' in manager_firewalld_ports.stdout"
+        fail_msg: "{{ flannel_ports_manager_fail_msg }}"
+        success_msg: "{{ flannel_ports_manager_success_msg }}"
+      when: "k8s_cni == 'flannel' and 'manager' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Validating Flannel ports on compute nodes
+      assert:
+        that:
+          - "'8285' in compute_firewalld_ports.stdout"
+          - "'8472' in compute_firewalld_ports.stdout"
+        fail_msg: "{{ flannel_ports_compute_fail_msg }}"
+        success_msg: "{{ flannel_ports_compute_success_msg }}"
+      when: "k8s_cni == 'flannel' and 'compute' in group_names"
+      tags: TC_008, VERIFY_008
+
+    - name: Stop and disable firewalld
+      service:
+        name: firewalld
+        state: stopped
+        enabled: no
+      tags: TC_008, VERIFY_008

+ 70 - 0
test/test_k8s_manager.yml

@@ -0,0 +1,70 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_005
+# Execute k8s_manager role in manager nodes with os installed centos 7.9
+- name: OMNIA_UKP_US_VFKP_TC_005
+  hosts: manager
+  vars_files:
+    - test_vars/test_k8s_manager_vars.yml
+  tasks:
+    - block:
+        - name: Call manager role
+          include_role:
+            name: ../roles/k8s_manager
+      tags: TC_005
+
+    - name: Checking helm installation status
+      command: helm version
+      register: helm_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_005, VERIFY_005
+
+    - name: Validating helm installation status
+      assert:
+        that:
+          - "'version.BuildInfo' in helm_status.stdout"
+        fail_msg: "{{ helm_status_fail_msg }}"
+        success_msg: "{{ helm_status_success_msg }}"
+      tags: TC_005, VERIFY_005
+
+# OMNIA_UKP_US_VFKP_TC_006
+# Execute k8s_manager role in manager nodes with helm already installed
+- name: OMNIA_UKP_US_VFKP_TC_006
+  hosts: manager
+  vars_files:
+    - test_vars/test_k8s_manager_vars.yml
+  tasks:
+    - block:
+        - name: Call manager role
+          include_role:
+            name: ../roles/k8s_manager
+      tags: TC_006, VERIFY_006
+
+    - name: Checking helm installation status
+      command: helm version
+      register: helm_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_006, VERIFY_006
+
+    - name: Validating helm installation status
+      assert:
+        that:
+          - "'command not found' not in helm_status.stdout"
+        fail_msg: "{{ helm_status_fail_msg }}"
+        success_msg: "{{ helm_status_success_msg }}"
+      tags: TC_006, VERIFY_006

+ 144 - 0
test/test_k8s_start_manager_workers.yml

@@ -0,0 +1,144 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_009, OMNIA_UKP_US_VFKP_TC_010
+# Execute k8s_start_manager role in manager nodes with os installed centos 7.9 and swap enabled
+- name: OMNIA_UKP_US_VFKP_TC_009, OMNIA_UKP_US_VFKP_TC_010
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_k8s_start_manager_workers_vars.yml
+  tasks:
+    - name: Enable Swap
+      command: /usr/sbin/swapon -a
+      changed_when: true
+      tags: TC_009, TC_010
+
+    - block:
+        - name: Call k8s_start_manager role
+          include_role:
+            name: ../roles/k8s_start_manager
+      tags: TC_009, TC_010
+
+    - name: Waiting for the pods deployment
+      pause:
+        minutes: 10
+      tags: TC_009, TC_010
+
+    - name: Checking master node
+      command: kubectl get nodes
+      register: master_node_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Checking kube-system pods
+      command: kubectl get pods --namespace kube-system --field-selector=status.phase=Running
+      register: kube_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Checking calico/flannel SDN network status
+      command: ip address
+      register: calico_flannel_status
+      changed_when: false
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Checking K8s service account and token
+      command: kubectl get secrets
+      register: service_account_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Validating master node status
+      assert:
+        that:
+          - "'master' in master_node_status.stdout"
+        fail_msg: "{{ master_node_status_fail_msg }}"
+        success_msg: "{{ master_node_status_success_msg }}"
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Validating controller-manager and scheduler and coreDNS pods status
+      assert:
+        that:
+          - "'kube-scheduler' in kube_system_pods.stdout"
+          - "'kube-controller' in kube_system_pods.stdout"
+        fail_msg: "{{ controller_scheduler_status_fail_msg }}"
+        success_msg: "{{ controller_scheduler_status_success_msg }}"
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Validating coreDNS pods status
+      assert:
+        that:
+          - "'coredns' in kube_system_pods.stdout"
+        fail_msg: "{{ coredns_status_fail_msg }}"
+        success_msg: "{{ coredns_status_success_msg }}"
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Validating calico/flannel SDN network status
+      assert:
+        that:
+          - "'calico' in kube_system_pods.stdout or 'flannel' in kube_system_pods.stdout"
+        fail_msg: "{{ calico_flannel_status_fail_msg }}"
+        success_msg: "{{ calico_flannel_status_success_msg }}"
+      tags: TC_009, TC_010, VERIFY_009
+
+    - name: Validating K8s service account and token status
+      assert:
+        that:
+          - "'kubernetes.io/service-account-token' in service_account_status.stdout"
+        fail_msg: "{{ k8s_service_account_status_fail_msg }}"
+        success_msg: "{{ k8s_service_account_status_success_msg }}"
+      tags: TC_009, TC_010, VERIFY_009
+
+# OMNIA_UKP_US_VFKP_TC_011, OMNIA_UKP_US_VFKP_TC_012
+# Execute k8s_start_workers role in compute nodes with os installed centos 7.9 and swap enabled
+- name: OMNIA_UKP_US_VFKP_TC_011, OMNIA_UKP_US_VFKP_TC_012
+  hosts: compute
+  gather_facts: false
+  tasks:
+    - name: Enable Swap
+      command: /usr/sbin/swapon -a
+      changed_when: true
+      tags: TC_011, TC_012
+
+    - block:
+        - name: Call k8s_start_workers role
+          include_role:
+            name: ../roles/k8s_start_workers.yml
+      tags: TC_011, TC_012
+
+- name: OMNIA_UKP_US_VFKP_TC_011, OMNIA_UKP_US_VFKP_TC_012
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_k8s_start_manager_workers_vars.yml
+  tasks:
+    - name: Check worker nodes status
+      command: kubectl get node --selector='!node-role.kubernetes.io/master'
+      register: worker_nodes_status
+      changed_when: false
+      ignore_errors: True
+      tags: TC_011, TC_012, VERIFY_011
+
+    - name: Validating worker nodes status
+      assert:
+        that:
+          - "'Ready' in worker_nodes_status.stdout"
+        fail_msg: "{{ worker_nodes_status_fail_msg }}"
+        success_msg: "{{ worker_nodes_status_success_msg }}"
+      tags: TC_011, TC_012, VERIFY_011

+ 97 - 0
test/test_k8s_start_services.yml

@@ -0,0 +1,97 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_013
+# Execute k8s_start_services role in manager nodes with os installed centos 7.9
+- name: OMNIA_UKP_US_VFKP_TC_013
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_k8s_start_services_vars.yml
+  tasks:
+    - block:
+        - name: Call k8s_start_services role
+          include_role:
+            name: ../roles/k8s_start_services
+      tags: TC_013
+
+    - name: Waiting for the pods deployment
+      pause:
+        minutes: 10
+      tags: TC_013
+
+    - name: Checking all running pods
+      command: kubectl get pods --all-namespaces --field-selector=status.phase=Running
+      register: running_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_013, VERIFY_013
+
+    - name: Checking default storage class
+      command: kubectl get sc
+      register: default_storage_class
+      changed_when: false
+      ignore_errors: True
+      tags: TC_013, VERIFY_013
+
+    - name: Validating Metallb, Prometheus and MPI pods
+      assert:
+        that:
+          - "'metallb' in running_pods.stdout"
+          - "'prometheus' in running_pods.stdout"
+          - "'mpi-operator' in running_pods.stdout"
+        fail_msg: "{{ metallb_prometheus_mpi_pods_fail_msg }}"
+        success_msg: "{{ metallb_prometheus_mpi_pods_success_msg }}"
+      tags: TC_013, VERIFY_013
+
+    - name: Validating K8s dashboard
+      assert:
+        that:
+          - "'kubernetes-dashboard' in running_pods.stdout"
+        fail_msg: "{{ kubernetes_dashboard_fail_msg }}"
+        success_msg: "{{ kubernetes_dashboard_success_msg }}"
+      tags: TC_013, VERIFY_013
+
+    - name: Validating NFS Client Provisioner pods
+      assert:
+        that:
+          - "'nfs-client-provisioner' in running_pods.stdout"
+        fail_msg: "{{ nfs_client_provisioner_pods_fail_msg }}"
+        success_msg: "{{ nfs_client_provisioner_pods_success_msg }}"
+      tags: TC_013, VERIFY_013
+
+    - name: Validating default storage class
+      assert:
+        that:
+          - "'nfs-client' in default_storage_class.stdout"
+        fail_msg: "{{ default_storage_class_fail_msg }}"
+        success_msg: "{{ default_storage_class_success_msg }}"
+      tags: TC_013, VERIFY_013
+
+    - name: Validating Node Feature Discovery pods
+      assert:
+        that:
+          - "'node-feature-discovery' in running_pods.stdout"
+        fail_msg: "{{ node_feature_discovery_pods_fail_msg }}"
+        success_msg: "{{ node_feature_discovery_pods_success_msg }}"
+      tags: TC_013, VERIFY_013
+
+    - name: Validating Nvidia device plugin pods
+      assert:
+        that:
+          - "'nvidia-device-plugin' in running_pods.stdout"
+        fail_msg: "{{ nvidia_device_plugin_pods_fail_msg }}"
+        success_msg: "{{ nvidia_device_plugin_pods_success_msg }}"
+      tags: TC_013, VERIFY_013

+ 50 - 0
test/test_k8s_start_workers.yml

@@ -0,0 +1,50 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_013, OMNIA_UKP_US_VFKP_TC_014
+# Execute startworkers role in compute nodes with os installed centos 7.9 and swap enabled
+- name: OMNIA_UKP_US_VFKP_TC_013, OMNIA_UKP_US_VFKP_TC_014
+  hosts: compute
+  vars_files:
+    - test_vars/test_k8s_start_workers_vars.yml
+  tasks:
+    - name: Enable Swap
+      command: /usr/sbin/swapon -a
+      changed_when: true
+      tags: TC_013, TC_014
+
+    - block:
+        - name: Call k8s_start_workers role
+          include_role:
+            name: ../roles/k8s_start_workers.yml
+      tags: TC_013, TC_014
+
+- name: OMNIA_UKP_US_VFKP_TC_013, OMNIA_UKP_US_VFKP_TC_014
+  hosts: manager
+  vars_files:
+    - test_vars/test_k8s_start_workers_vars.yml
+  tasks:
+    - name: Check worker nodes status
+      command: kubectl get node --selector='!node-role.kubernetes.io/master'
+      register: worker_nodes_status
+      tags: TC_013, TC_014, VERIFY_013
+
+    - name: Validating worker nodes status
+      assert:
+        that:
+          - "'Ready' in worker_nodes_status.stdout"
+        fail_msg: "{{ worker_nodes_status_fail_msg }}"
+        success_msg: "{{ worker_nodes_status_success_msg }}"
+      tags: TC_013, TC_014, VERIFY_013

+ 123 - 0
test/test_kubeflow.yml

@@ -0,0 +1,123 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+# OMNIA_UKP_US_VFKP_TC_016
+# Execute kubeflow role in manager nodes with os installed centos 7.9
+- name: OMNIA_UKP_US_VFKP_TC_016
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_kubeflow_vars.yml
+  tasks:
+    - block:
+        - name: Call kubeflow role
+          include_role:
+            name: ../platforms/roles/kubeflow
+      tags: TC_016
+
+    - name: Waiting for the pods deployment
+      pause:
+        minutes: 5
+      tags: TC_016
+
+    - name: Checking installed Kubeflow version
+      command: kfctl version
+      register: kfctl_version
+      changed_when: false
+      ignore_errors: True
+      tags: TC_016, VERIFY_016
+
+    - name: Checking pods under kubeflow namespace
+      command: kubectl get pods --namespace kubeflow
+      register: kubeflow_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_016, VERIFY_016
+
+    - name: Checking pods under istio-system namespace
+      command: kubectl get pods --namespace istio-system
+      register: istio_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_016, VERIFY_016
+
+    - name: Validating Kubeflow Installation
+      assert:
+        that:
+          - "'command not found' not in kfctl_version.stdout"
+        fail_msg: "{{ kubeflow_install_fail_msg }}"
+        success_msg: "{{ kubeflow_install_success_msg }}"
+      tags: TC_016, VERIFY_016
+
+    - name: Validating Kubeflow pods deployment
+      assert:
+        that:
+          - "'Running' in kubeflow_pods.stdout or 'ContainerCreating' in kubeflow_pods.stdout"
+          - "'Running' in istio_system_pods.stdout or 'ContainerCreating' in istio_system_pods.stdout"
+        fail_msg: "{{ kubeflow_pods_deployment_fail_msg }}"
+        success_msg: "{{ kubeflow_pods_deployment_success_msg }}"
+      tags: TC_016, VERIFY_016
+
+# OMNIA_UKP_US_VFKP_TC_017
+# Execute kubeflow role in manager nodes with kubeflow already deployed
+- name: OMNIA_UKP_US_VFKP_TC_017
+  hosts: manager
+  gather_facts: false
+  vars_files:
+    - test_vars/test_kubeflow_vars.yml
+  tasks:
+    - block:
+        - name: Call kubeflow role
+          include_role:
+            name: ../platforms/roles/kubeflow
+      tags: TC_017, VERIFY_017
+
+    - name: Checking installed Kubeflow version
+      command: kfctl version
+      register: kfctl_version
+      changed_when: false
+      ignore_errors: True
+      tags: TC_017, VERIFY_017
+
+    - name: Checking pods under kubeflow namespace
+      command: kubectl get pods --namespace kubeflow
+      register: kubeflow_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_017, VERIFY_017
+
+    - name: Checking pods under istio-system namespace
+      command: kubectl get pods --namespace istio-system
+      register: istio_system_pods
+      changed_when: false
+      ignore_errors: True
+      tags: TC_017, VERIFY_017
+
+    - name: Validating Kubeflow Installation
+      assert:
+        that:
+          - "'command not found' not in kfctl_version.stdout"
+        fail_msg: "{{ kubeflow_install_fail_msg }}"
+        success_msg: "{{ kubeflow_install_success_msg }}"
+      tags: TC_017, VERIFY_017
+
+    - name: Validating Kubeflow pods deployment
+      assert:
+        that:
+          - "'Running' in kubeflow_pods.stdout or 'ContainerCreating' in kubeflow_pods.stdout"
+          - "'Running' in istio_system_pods.stdout or 'ContainerCreating' in istio_system_pods.stdout"
+        fail_msg: "{{ kubeflow_pods_deployment_fail_msg }}"
+        success_msg: "{{ kubeflow_pods_deployment_success_msg }}"
+      tags: TC_017, VERIFY_017

+ 22 - 0
test/test_vars/test_jupyterhub_vars.yml

@@ -0,0 +1,22 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+jupyterhub_pods_fail_msg: "JupyterHub pods are not deployed"
+
+jupyterhub_pods_success_msg: "JupyterHub pods are deployed and running"
+
+jupyterhub_services_fail_msg: "JupyterHub services are not running"
+
+jupyterhub_services_success_msg: "JupyterHub services are running"

+ 34 - 0
test/test_vars/test_k8s_common_vars.yml

@@ -0,0 +1,34 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+common_packages:
+  - docker
+  - kubelet 
+
+k8_packages:
+  - kubeadm
+  - kubectl
+
+packages_status_success_msg: "Common & K8s packages are installed"
+
+packages_status_fail_msg: "Common & K8s packages are not installed"
+
+docker_service_fail_msg: "Docker service is not running"
+
+docker_service_success_msg: "Docker service is running"
+
+kubelet_service_fail_msg: "K8s service is not running"
+
+kubelet_service_success_msg: "K8s service is running"

+ 38 - 0
test/test_vars/test_k8s_firewalld_vars.yml

@@ -0,0 +1,38 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+manager_k8s_ports_status_fail_msg: "Kubernetes Ports are not opened in manager node"
+
+manager_k8s_ports_status_success_msg: "Kubernetes Ports are opened in manager node"
+
+compute_k8s_ports_status_fail_msg: "Kubernetes Ports are not opened in compute nodes"
+
+compute_k8s_ports_status_success_msg: "Kubernetes Ports are opened in compute nodes"
+
+calico_ports_manager_fail_msg: "Calico ports are not opened in manager nodes"
+
+calico_ports_manager_success_msg: "Calico ports are opened in manager nodes"
+
+calico_ports_compute_fail_msg: "Calico ports are not opened in compute nodes"
+
+calico_ports_compute_success_msg: "Calico ports are opened in compute nodes"
+
+flannel_ports_manager_fail_msg: "Flannel ports are not opened in manager nodes"
+
+flannel_ports_manager_success_msg: "Flannel ports are opened in manager nodes"
+
+flannel_ports_compute_fail_msg: "Flannel ports are not opened in compute nodes"
+
+flannel_ports_compute_success_msg: "Flannel ports are opened in compute nodes"

+ 17 - 0
test/test_vars/test_k8s_manager_vars.yml

@@ -0,0 +1,17 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+helm_status_fail_msg: "Helm is not installed"
+
+helm_status_success_msg: "Helm is installed"

+ 38 - 0
test/test_vars/test_k8s_start_manager_workers_vars.yml

@@ -0,0 +1,38 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+master_node_status_fail_msg: "Master Node is not configured"
+
+master_node_status_success_msg: "Master Node is configured and initialized successfully"
+
+controller_scheduler_status_fail_msg: "Static Pod manifests (controller-manager and scheduler) are not deployed"
+
+controller_scheduler_status_success_msg: "Static Pod manifests (controller-manager and scheduler) are deployed and running"
+
+coredns_status_fail_msg: "Core DNS pods are not deployed"
+
+coredns_status_success_msg: "Core DNS pods are deployed and running"
+
+calico_flannel_status_fail_msg: "Calico/Flannel SDN network is not deployed"
+
+calico_flannel_status_success_msg: "Calico/Flannel SDN network is deployed and running"
+
+k8s_service_account_status_fail_msg: "Kubernetes dashboard service account and token is not created"
+
+k8s_service_account_status_success_msg: "Kubernetes dashboard service account and token is created"
+
+worker_nodes_status_fail_msg: "Worker Nodes are not initialized"
+
+worker_nodes_status_success_msg: "Worker Nodes are initialized and joined to the cluster"

+ 38 - 0
test/test_vars/test_k8s_start_services_vars.yml

@@ -0,0 +1,38 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+metallb_prometheus_mpi_pods_fail_msg: "Metallb/Prometheus/MPI pods are not deployed/running"
+
+metallb_prometheus_mpi_pods_success_msg: "Metallb, Prometheus and MPI pods are deployed and running"
+
+kubernetes_dashboard_fail_msg: "Kubernetes Dashboard is not deployed"
+
+kubernetes_dashboard_success_msg: "Kubernetes Dashboard is deployed"
+
+nfs_client_provisioner_pods_fail_msg: "NFS Client Provisioner pod is not deployed"
+
+nfs_client_provisioner_pods_success_msg: "NFS Client Provisioner pod is deployed and running"
+
+node_feature_discovery_pods_fail_msg: "Node Feature Discovery pods are not deployed"
+
+node_feature_discovery_pods_success_msg: "Node Feature Discovery pods are deployed and running"
+
+nvidia_device_plugin_pods_fail_msg: "Nvidia Device Plugin pod is not deployed/running"
+
+nvidia_device_plugin_pods_success_msg: "Nvidia Device Plugin pod is deployed and running"
+
+default_storage_class_fail_msg: "NFS Client Provisioner is not configured as default storage class"
+
+default_storage_class_success_msg: "NFS Client Provisioner is configured as default storage class"

+ 22 - 0
test/test_vars/test_kubeflow_vars.yml

@@ -0,0 +1,22 @@
+#  Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+---
+
+kubeflow_install_fail_msg: "Kubeflow (kfctl) is not installed"
+
+kubeflow_install_success_msg: "Kubeflow (kfctl) is installed"
+
+kubeflow_pods_deployment_fail_msg: "Kubeflow pods are not deployed"
+
+kubeflow_pods_deployment_success_msg: "Kubeflow pods are deployed"